Project import generated by Copybara.
GitOrigin-RevId: f7ea1bca60e4f562866b25311fb12d178513d252
diff --git a/device.bazelrc b/device.bazelrc
index c678743..d52a4df 100644
--- a/device.bazelrc
+++ b/device.bazelrc
@@ -1,6 +1,6 @@
build:download_gki --use_prebuilt_gki
build:download_gki --use_signed_prebuilts
-build:download_gki --action_env=KLEAF_DOWNLOAD_BUILD_NUMBER_MAP="gki_prebuilts=12637676"
+build:download_gki --action_env=KLEAF_DOWNLOAD_BUILD_NUMBER_MAP="gki_prebuilts=12916019"
build:no_download_gki --use_prebuilt_gki=false
# disable GKI prebuilts by default
diff --git a/drivers/gpu/img-rogue/23.2/build/linux/config/core.mk b/drivers/gpu/img-rogue/23.2/build/linux/config/core.mk
index 5460ca7..7b7b8ab 100644
--- a/drivers/gpu/img-rogue/23.2/build/linux/config/core.mk
+++ b/drivers/gpu/img-rogue/23.2/build/linux/config/core.mk
@@ -2248,6 +2248,27 @@
$(eval $(call TunableBothConfigC,SUPPORT_PMR_DEFERRED_FREE,1,\
Free device mapped PMRs asynchronously from KMD pvr_defer_free thread._\
))
+
+ $(eval $(call TunableBothConfigC,SUPPORT_PMR_PAGES_DEFERRED_FREE,1,\
+ Support deferred freeing of PMR pages freed as a result of PMR_ChangeSparseMem._\
+ ))
+
+ ifeq ($(SUPPORT_PMR_PAGES_DEFERRED_FREE),1)
+ ifeq ($(SUPPORT_PMR_DEFERRED_FREE),0)
+ $(error SUPPORT_PMR_PAGES_DEFERRED_FREE requires SUPPORT_PMR_DEFERRED_FREE)
+ endif
+ endif
+
+ $(eval $(call TunableBothConfigC,SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE,1,\
+ Support deferred freeing of PMRs used by multiple devices._\
+ ))
+
+ ifeq ($(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE),1)
+ ifeq ($(SUPPORT_PMR_DEFERRED_FREE),0)
+ $(error SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE requires SUPPORT_PMR_DEFERRED_FREE)
+ endif
+ endif
+
$(eval $(call TunableBothConfigC,SUPPORT_MMU_DEFERRED_FREE,1,\
Free MMU mappings asynchronously from KMD pvr_defer_free thread._\
))
diff --git a/drivers/gpu/img-rogue/23.2/build/linux/config/core_volcanic.mk b/drivers/gpu/img-rogue/23.2/build/linux/config/core_volcanic.mk
index c711f13..6db6dbd 100644
--- a/drivers/gpu/img-rogue/23.2/build/linux/config/core_volcanic.mk
+++ b/drivers/gpu/img-rogue/23.2/build/linux/config/core_volcanic.mk
@@ -2089,6 +2089,17 @@
$(eval $(call TunableBothConfigC,SUPPORT_PMR_DEFERRED_FREE,1,\
Free device mapped PMRs asynchronously from KMD pvr_defer_free thread._\
))
+
+ $(eval $(call TunableBothConfigC,SUPPORT_PMR_PAGES_DEFERRED_FREE,1,\
+ Support deferred freeing of PMR pages freed as a result of PMR_ChangeSparseMem._\
+ ))
+
+ ifeq ($(SUPPORT_PMR_PAGES_DEFERRED_FREE),1)
+ ifeq ($(SUPPORT_PMR_DEFERRED_FREE),0)
+ $(error SUPPORT_PMR_PAGES_DEFERRED_FREE requires SUPPORT_PMR_DEFERRED_FREE)
+ endif
+ endif
+
$(eval $(call TunableBothConfigC,SUPPORT_MMU_DEFERRED_FREE,1,\
Free MMU mappings asynchronously from KMD pvr_defer_free thread._\
))
diff --git a/drivers/gpu/img-rogue/23.2/include/img_types.h b/drivers/gpu/img-rogue/23.2/include/img_types.h
index 7b4eb64..ed852ea 100644
--- a/drivers/gpu/img-rogue/23.2/include/img_types.h
+++ b/drivers/gpu/img-rogue/23.2/include/img_types.h
@@ -256,6 +256,14 @@ typedef int IMG_OS_CONNECTION;
#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#if defined(__KERNEL__)
+ #if !defined(DEBUG) && defined(__linux__)
+ #define IMG_KM_PTR_FMTSPEC "%pK"
+ #else
+ #define IMG_KM_PTR_FMTSPEC "%p"
+ #endif
+#endif
+
/* cpu physical address */
typedef struct
{
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/connection_server.c b/drivers/gpu/img-rogue/23.2/services/server/common/connection_server.c
index efb0a97..f017ba8 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/connection_server.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/connection_server.c
@@ -100,7 +100,7 @@ static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
* doesn't even call pfnReleaseData() callback.
* Process handles can potentially return RETRY hence additional check
* below. */
- eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, psConnection->pid,
+ eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase,
ui64MaxBridgeTime);
if (PVRSRVIsRetryError(eError))
{
@@ -249,8 +249,8 @@ PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData)
PVRSRV_HANDLE_BASE_TYPE_CONNECTION);
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure);
- /* get process handle base (if it doesn't exist it will be allocated) */
- eError = PVRSRVAcquireProcessHandleBase(psConnection->pid, &psProcessHandleBase);
+ /* get process handle base for the current process (if it doesn't exist it will be allocated) */
+ eError = PVRSRVAcquireProcessHandleBase(&psProcessHandleBase);
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireProcessHandleBase", failure);
/* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/devicemem_server.c b/drivers/gpu/img-rogue/23.2/services/server/common/devicemem_server.c
index 42c35f9..b9035b1 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/devicemem_server.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/devicemem_server.c
@@ -983,6 +983,13 @@ DevmemValidateFlags(PMR *psPMR, PVRSRV_MEMALLOCFLAGS_T uiMapFlags)
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_FLAGS, ErrorReturnError);
}
+ if ((uiMapFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) !=
+ (uiPMRFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR's device specific flags don't match mapping flags.", __func__));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_FLAGS, ErrorReturnError);
+ }
+
ErrorReturnError:
return eError;
}
@@ -1012,6 +1019,10 @@ DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv,
"mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
PVR_LOG_RETURN_IF_FALSE((uiFlags & ~PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK) == 0,
"invalid flags", PVRSRV_ERROR_INVALID_FLAGS);
+ PVR_LOG_RETURN_IF_FALSE(!PMR_IsSparse(psPMR),
+ "PMR is Sparse, devmemx PMRs should be non-sparse", PVRSRV_ERROR_INVALID_FLAGS);
+ PVR_LOG_RETURN_IF_FALSE(!(PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC),
+ "PMR allocation is deferred, devmemx PMRs can not be deferred", PVRSRV_ERROR_INVALID_FLAGS);
if (uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
{
@@ -1089,14 +1100,6 @@ DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv,
{
if (psRsrv->ppsPMR[i] != NULL)
{
-#if defined(SUPPORT_PMR_DEFERRED_FREE)
- /* If PMR is allocated on demand the backing memory is freed by
- * pfnUnlockPhysAddresses(). */
- if (!PVRSRV_CHECK_ON_DEMAND(PMR_Flags(psRsrv->ppsPMR[i])))
- {
- PMRMarkForDeferFree(psRsrv->ppsPMR[i]);
- }
-#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
PMRUnrefPMR2(psRsrv->ppsPMR[i]);
psRsrv->ppsPMR[i] = NULL;
}
@@ -1221,7 +1224,7 @@ DevmemIntMapPMR2(DEVMEMINT_HEAP *psDevmemHeap,
if (!DevmemIntReservationAcquireUnlocked(psReservation))
{
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError);
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReleaseResLock);
}
uiAllocationSize = psReservation->uiLength;
@@ -1232,6 +1235,13 @@ DevmemIntMapPMR2(DEVMEMINT_HEAP *psDevmemHeap,
eError = PMRLockSysPhysAddresses(psPMR);
PVR_GOTO_IF_ERROR(eError, ErrorUnreference);
+ PMRLockPMR(psPMR);
+
+ /* Increase reservation association count so we know if multiple mappings have been created
+ * on the PMR
+ */
+ PMRGpuResCountIncr(psPMR);
+
sAllocationDevVAddr = psReservation->sBase;
/*Check if the PMR that needs to be mapped is sparse */
@@ -1300,6 +1310,7 @@ DevmemIntMapPMR2(DEVMEMINT_HEAP *psDevmemHeap,
psReservation->psMappedPMR = psPMR;
+ PMRUnlockPMR(psPMR);
OSLockRelease(psReservation->hLock);
return PVRSRV_OK;
@@ -1314,7 +1325,11 @@ DevmemIntMapPMR2(DEVMEMINT_HEAP *psDevmemHeap,
0);
ErrorFreeSparseTmpBuf:
OSFreeMem(pvTmpBuf);
+
ErrorUnlockPhysAddr:
+ PMRGpuResCountDecr(psPMR);
+ PMRUnlockPMR(psPMR);
+
{
PVRSRV_ERROR eError1 = PVRSRV_OK;
eError1 = PMRUnlockSysPhysAddresses(psPMR);
@@ -1325,6 +1340,7 @@ DevmemIntMapPMR2(DEVMEMINT_HEAP *psDevmemHeap,
/* if fails there's not much to do (the function will print an error) */
DevmemIntReservationReleaseUnlocked(psReservation);
+ErrorReleaseResLock:
OSLockRelease(psReservation->hLock);
ErrorReturnError:
@@ -1444,6 +1460,8 @@ DevmemIntUnmapPMR2(DEVMEMINT_RESERVATION2 *psReservation)
sAllocationDevVAddr = psReservation->sBase;
OSLockAcquire(psReservation->hLock);
+ PMRLockPMR(psReservation->psMappedPMR);
+
bIsSparse = PMR_IsSparse(psReservation->psMappedPMR);
if (bIsSparse)
@@ -1478,14 +1496,10 @@ DevmemIntUnmapPMR2(DEVMEMINT_RESERVATION2 *psReservation)
PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPMRFast", ErrUnlock);
}
-#if defined(SUPPORT_PMR_DEFERRED_FREE)
- /* If PMR is allocated on demand the backing memory is freed by
- * pfnUnlockPhysAddresses(). */
- if (!PVRSRV_CHECK_ON_DEMAND(PMR_Flags(psReservation->psMappedPMR)))
- {
- PMRMarkForDeferFree(psReservation->psMappedPMR);
- }
-#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
+ PMRGpuResCountDecr(psReservation->psMappedPMR);
+
+ PMRUnlockPMR(psReservation->psMappedPMR);
eError = PMRUnlockSysPhysAddresses(psReservation->psMappedPMR);
PVR_ASSERT(eError == PVRSRV_OK);
@@ -1499,6 +1513,7 @@ DevmemIntUnmapPMR2(DEVMEMINT_RESERVATION2 *psReservation)
return PVRSRV_OK;
ErrUnlock:
+ PMRUnlockPMR(psReservation->psMappedPMR);
OSLockRelease(psReservation->hLock);
return eError;
@@ -1731,6 +1746,57 @@ DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
}
static PVRSRV_ERROR
+DevmemIntChangeSparseValidateParams(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 ui32LogicalChunkCount,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags)
+{
+ /* Ensure a PMR has been mapped to this reservation. */
+ PVR_LOG_RETURN_IF_INVALID_PARAM(uiSparseFlags & SPARSE_RESIZE_BOTH, "uiSparseFlags");
+
+ if (!PMR_IsSparse(psPMR) || PMR_IsMemLayoutFixed(psPMR) ||
+ PMR_IsCpuMapped(psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: PMR cannot be changed because one or more of the following"
+ " were true: !PMR_IsSparse() = %s, PMR_IsMemLayoutFixed() = %s,"
+ " PMR_IsCpuMapped() = %s",
+ __func__,
+ !PMR_IsSparse(psPMR) ? "true" : "false",
+ PMR_IsMemLayoutFixed(psPMR) ? "true" : "false",
+ PMR_IsCpuMapped(psPMR) ? "true" : "false"));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ if (PMR_IsGpuMultiMapped(psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: PMR cannot be changed because PMR_IsGpuMultiMapped() = true",
+ __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ if (uiSparseFlags & SPARSE_RESIZE_ALLOC)
+ {
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32AllocPageCount != 0, "ui32AllocPageCount");
+ PVR_LOG_RETURN_IF_FALSE(ui32AllocPageCount <= ui32LogicalChunkCount,
+ "ui32AllocPageCount is invalid",
+ PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE);
+ }
+
+ if (uiSparseFlags & SPARSE_RESIZE_FREE)
+ {
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32FreePageCount != 0, "ui32FreePageCount");
+ PVR_LOG_RETURN_IF_FALSE(ui32FreePageCount <= ui32LogicalChunkCount,
+ "ui32FreePageCount is invalid",
+ PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE);
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
DevmemIntValidateSparsePMRIndices(IMG_UINT32 ui32PMRLogicalChunkCount,
IMG_UINT32 *paui32LogicalIndices,
IMG_UINT32 ui32LogicalIndiceCount)
@@ -1802,8 +1868,8 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
{
PVRSRV_ERROR eError = PVRSRV_OK;
- IMG_UINT32 uiLog2PMRContiguity;
- IMG_UINT32 uiLog2HeapContiguity;
+ IMG_UINT32 ui32Log2PMRContiguity;
+ IMG_UINT32 ui32Log2HeapContiguity;
IMG_UINT32 uiOrderDiff;
PVRSRV_MEMALLOCFLAGS_T uiFlags;
@@ -1815,13 +1881,14 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
IMG_UINT64 ui64PMRLogicalSize;
IMG_UINT32 ui32LogicalChunkCount;
+ OSLockAcquire(psReservation->hLock);
+
+ uiFlags = psReservation->uiFlags;
+
+ PVR_LOG_GOTO_IF_INVALID_PARAM(psReservation->psMappedPMR != NULL, eError, InvalidPMRErr);
+
PVR_UNREFERENCED_PARAMETER(psDevmemHeap);
- PMR_LogicalSize(psPMR, &ui64PMRLogicalSize);
- ui32LogicalChunkCount = ui64PMRLogicalSize >> PMR_GetLog2Contiguity(psPMR);
-
- /* Ensure a PMR has been mapped to this reservation. */
- PVR_LOG_RETURN_IF_INVALID_PARAM(psReservation->psMappedPMR != NULL, "psReservation");
{
IMG_UINT64 ui64PMRUID;
@@ -1835,63 +1902,38 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
PVR_DPF((PVR_DBG_ERROR,
"%s: Reservation doesn't represent virtual range associated"
" with given mapped PMR", __func__));
- return PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidPMRErr);
}
}
psPMR = psReservation->psMappedPMR;
+ PMRLockPMR(psPMR);
- if (!PMR_IsSparse(psPMR))
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Given PMR is not Sparse",
- __func__));
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+ ui32Log2PMRContiguity = PMR_GetLog2Contiguity(psPMR);
- uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR);
- uiLog2HeapContiguity = psReservation->psDevmemHeap->uiLog2PageSize;
+ PMR_LogicalSize(psPMR, &ui64PMRLogicalSize);
+ ui32LogicalChunkCount = ui64PMRLogicalSize >> ui32Log2PMRContiguity;
- /* This is check is made in DevmemIntMapPMR - no need to do it again in release. */
- PVR_ASSERT(uiLog2HeapContiguity <= uiLog2PMRContiguity);
+ ui32Log2HeapContiguity = psReservation->psDevmemHeap->uiLog2PageSize;
- if (uiSparseFlags & SPARSE_RESIZE_ALLOC)
- {
- PVR_LOG_RETURN_IF_INVALID_PARAM(ui32AllocPageCount != 0, "ui32AllocPageCount");
- PVR_LOG_RETURN_IF_FALSE(ui32AllocPageCount <= ui32LogicalChunkCount,
- "ui32AllocPageCount is invalid",
- PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE);
- }
+ eError = DevmemIntChangeSparseValidateParams(psPMR,
+ ui32AllocPageCount,
+ ui32FreePageCount,
+ ui32LogicalChunkCount,
+ uiSparseFlags);
+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntChangeSparseValidateParams", e0);
- if (uiSparseFlags & SPARSE_RESIZE_FREE)
- {
- PVR_LOG_RETURN_IF_INVALID_PARAM(ui32FreePageCount != 0, "ui32FreePageCount");
- PVR_LOG_RETURN_IF_FALSE(ui32FreePageCount <= ui32LogicalChunkCount,
- "ui32FreePageCount is invalid",
- PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE);
- }
-
- if (PMR_IsMemLayoutFixed(psPMR) || PMR_IsCpuMapped(psPMR))
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: This PMR layout cannot be changed - PMR_IsMemLayoutFixed()=%c, _PMR_IsMapped()=%c",
- __func__,
- PMR_IsMemLayoutFixed(psPMR) ? 'Y' : 'n',
- PMR_IsCpuMapped(psPMR) ? 'Y' : 'n'));
- return PVRSRV_ERROR_PMR_NOT_PERMITTED;
- }
-
- uiFlags = psReservation->uiFlags;
eError = DevmemValidateFlags(psPMR, uiFlags);
PVR_LOG_GOTO_IF_ERROR(eError, "DevmemValidateFlags", e0);
+ /* This is check is made in DevmemIntMapPMR - no need to do it again in release. */
+ PVR_ASSERT(ui32Log2HeapContiguity <= ui32Log2PMRContiguity);
+
pai32MapIndices = pai32AllocIndices;
pai32UnmapIndices = pai32FreeIndices;
uiMapPageCount = ui32AllocPageCount;
uiUnmapPageCount = ui32FreePageCount;
- OSLockAcquire(psReservation->hLock);
-
/*
* The order of steps in which this request is done is given below. The order of
* operations is very important in this case:
@@ -1929,7 +1971,7 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
PVR_LOG_GOTO_IF_ERROR(eError, "DevmemIntValidateSparsePMRIndices", e0);
}
- uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity;
+ uiOrderDiff = ui32Log2PMRContiguity - ui32Log2HeapContiguity;
/* Special case:
* Adjust indices if we map into a heap that uses smaller page sizes
@@ -2032,7 +2074,7 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
psReservation->sBase,
uiUnmapPageCount,
pai32UnmapIndices,
- uiLog2HeapContiguity,
+ ui32Log2HeapContiguity,
uiPMRFlags);
PVR_LOG_GOTO_IF_ERROR(eError, "MMU_UnmapPages", e1);
@@ -2051,12 +2093,12 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
}
/* Do the PMR specific changes */
- eError = PMR_ChangeSparseMem(psPMR,
- ui32AllocPageCount,
- pai32AllocIndices,
- ui32FreePageCount,
- pai32FreeIndices,
- uiSparseFlags);
+ eError = PMR_ChangeSparseMemUnlocked(psPMR,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiSparseFlags);
if (PVRSRV_OK != eError)
{
PVR_DPF((PVR_DBG_MESSAGE,
@@ -2077,7 +2119,7 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
0,
uiMapPageCount,
pai32MapIndices,
- uiLog2HeapContiguity);
+ ui32Log2HeapContiguity);
if (PVRSRV_OK != eError)
{
PVR_DPF((PVR_DBG_MESSAGE,
@@ -2134,6 +2176,8 @@ DevmemIntChangeSparse2(DEVMEMINT_HEAP *psDevmemHeap,
OSFreeMem(pai32UnmapIndices);
}
e0:
+ PMRUnlockPMR(psPMR);
+InvalidPMRErr:
OSLockRelease(psReservation->hLock);
return eError;
}
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/handle.c b/drivers/gpu/img-rogue/23.2/services/server/common/handle.c
index 7d0b48d..3761897 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/handle.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/handle.c
@@ -1764,6 +1764,7 @@ PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
return PVRSRV_OK;
ErrorDestroyHandleBase:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
(void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
ErrorUnlock:
@@ -2109,6 +2110,8 @@ PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase)
return eError;
}
+/* Only called from sync_fallback_server.c */
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
/*!
*******************************************************************************
@Function PVRSRVRetrieveProcessHandleBase
@@ -2148,8 +2151,13 @@ PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void)
/* Not being called from the cleanup thread, so return the process
* handle base for the current process.
*/
+ uintptr_t uiHashKey;
+
+ uiHashKey = OSAcquireCurrentPPIDResourceRefKM();
+ OSReleasePPIDResourceRefKM(uiHashKey);
+
psProcHandleBase = (PROCESS_HANDLE_BASE *)
- HASH_Retrieve(g_psProcessHandleBaseTable, OSGetCurrentClientProcessIDKM());
+ HASH_Retrieve(g_psProcessHandleBaseTable, uiHashKey);
}
OSLockRelease(g_hProcessHandleBaseLock);
@@ -2160,26 +2168,30 @@ PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void)
}
return psHandleBase;
}
+#endif
/*!
*******************************************************************************
@Function PVRSRVAcquireProcessHandleBase
- @Description Increments reference count on a process handle base identified
- by uiPid and returns pointer to the base. If the handle base
- does not exist it will be allocated.
- @Inout uiPid - PID of a process
- @Output ppsBase - pointer to a handle base for the process identified by
- uiPid
+ @Description Increments reference count on the process handle base for the
+ current process and returns pointer to the base. If the handle
+ base does not exist it will be allocated.
+ @Output ppsBase - pointer to a handle base for the current process
@Return Error code or PVRSRV_OK
******************************************************************************/
-PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase)
+PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(PROCESS_HANDLE_BASE **ppsBase)
{
PROCESS_HANDLE_BASE *psBase;
PVRSRV_ERROR eError;
+ uintptr_t uiHashKey;
OSLockAcquire(g_hProcessHandleBaseLock);
- psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiPid);
+ /* Acquire the process resource hash key (and take ref) */
+ uiHashKey = OSAcquireCurrentPPIDResourceRefKM();
+ PVR_ASSERT(uiHashKey != 0);
+
+ psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiHashKey);
/* In case there is none we are going to allocate one */
if (psBase == NULL)
@@ -2194,7 +2206,8 @@ PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE *
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorFreeProcessHandleBase);
/* Insert the handle base into the global hash table */
- bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiPid, (uintptr_t) psBase);
+ psBase->uiHashKey = uiHashKey;
+ bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiHashKey, (uintptr_t)psBase);
PVR_LOG_GOTO_IF_FALSE(bSuccess, "HASH_Insert failed", ErrorFreeHandleBase);
}
@@ -2207,10 +2220,12 @@ PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE *
return PVRSRV_OK;
ErrorFreeHandleBase:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
PVRSRVFreeHandleBase(psBase->psHandleBase, 0);
ErrorFreeProcessHandleBase:
OSFreeMem(psBase);
ErrorUnlock:
+ OSReleasePPIDResourceRefKM(uiHashKey);
OSLockRelease(g_hProcessHandleBaseLock);
return eError;
@@ -2220,22 +2235,21 @@ PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE *
*******************************************************************************
@Function PVRSRVReleaseProcessHandleBase
@Description Decrements reference count on a process handle base psBase
- for a process identified by uiPid. If the reference count
- reaches 0 the handle base will be freed..
+ for the current process. If the reference count reaches 0 the
+ process handle base will be freed.
@Input psBase - pointer to a process handle base
- @Inout uiPid - PID of a process
@Inout ui64MaxBridgeTime - maximum time a handle destroy operation
can hold the handle base lock (after that
time a lock will be release and reacquired
for another time slice)
@Return Error code or PVRSRV_OK
******************************************************************************/
-PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid,
+PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase,
IMG_UINT64 ui64MaxBridgeTime)
{
PVRSRV_ERROR eError;
IMG_INT iRefCount;
- uintptr_t uiHashValue;
+ uintptr_t uiHashValue = 0;
OSLockAcquire(g_hProcessHandleBaseLock);
@@ -2243,6 +2257,8 @@ PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID
if (iRefCount != 0)
{
+ /* Release the process resource hash key (drop ref) */
+ OSReleasePPIDResourceRefKM(psBase->uiHashKey);
OSLockRelease(g_hProcessHandleBaseLock);
return PVRSRV_OK;
}
@@ -2250,7 +2266,10 @@ PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID
/* in case the refcount becomes 0 we can remove the process handle base
* and all related objects */
- uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, uiPid);
+ uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, psBase->uiHashKey);
+ /* Release the process resource hash key (drop ref) */
+ OSReleasePPIDResourceRefKM(psBase->uiHashKey);
+ psBase->uiHashKey = 0;
OSLockRelease(g_hProcessHandleBaseLock);
PVR_LOG_RETURN_IF_FALSE(uiHashValue != 0, "HASH_Remove failed",
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/mmu_common.c b/drivers/gpu/img-rogue/23.2/services/server/common/mmu_common.c
index 7269673..cabc077 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/mmu_common.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/mmu_common.c
@@ -3205,6 +3205,10 @@ MMU_MapPages(MMU_CONTEXT *psMMUContext,
}
}
+#if defined(SUPPORT_PMR_DEFERRED_FREE)
+ PMRMarkForDeferFree(psPMR);
+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
OSLockAcquire(psMMUContext->hLock);
for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++)
@@ -3892,7 +3896,7 @@ MMUX_MapVRangeToBackingPage(MMU_CONTEXT *psMMUContext,
PVRSRV_ERROR
MMU_MapPMRFast(MMU_CONTEXT *psMMUContext,
IMG_DEV_VIRTADDR sDevVAddrBase,
- const PMR *psPMR,
+ PMR *psPMR,
IMG_DEVMEM_SIZE_T uiSizeBytes,
PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
IMG_UINT32 uiLog2HeapPageSize)
@@ -3969,6 +3973,10 @@ MMU_MapPMRFast(MMU_CONTEXT *psMMUContext,
PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, put_mmu_context);
}
+#if defined(SUPPORT_PMR_DEFERRED_FREE)
+ PMRMarkForDeferFree(psPMR);
+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
OSLockAcquire(psMMUContext->hLock);
do
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/pdump_server.c b/drivers/gpu/img-rogue/23.2/services/server/common/pdump_server.c
index 48f6dbe..45dac0f 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/pdump_server.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/pdump_server.c
@@ -3950,7 +3950,7 @@ PVRSRV_ERROR PDumpCommentWithFlagsVA(PVRSRV_DEVICE_NODE *psDeviceNode,
const IMG_CHAR * pszFormat, va_list args)
{
IMG_INT32 iCount;
- PVRSRV_ERROR eErr = PVRSRV_OK;
+ PVRSRV_ERROR eErr = PVRSRV_ERROR_INVALID_PARAMS;
PDUMP_GET_MSG_STRING();
/* Construct the string */
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/physheap.c b/drivers/gpu/img-rogue/23.2/services/server/common/physheap.c
index 4b57898..ff72f5e 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/physheap.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/physheap.c
@@ -370,7 +370,7 @@ static PVRSRV_ERROR PhysHeapCreatePropertiesString(PHYS_HEAP *psPhysHeap,
iCount = OSSNPrintf(pszPhysHeapString,
ui32Size,
- "0x%p -> PdMs: %s, Type: %s, "
+ "0x"IMG_KM_PTR_FMTSPEC" -> PdMs: %s, Type: %s, "
"CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", "
"GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", "
"Usage Flags: 0x%08x (%s), Refs: %d, "
@@ -391,7 +391,7 @@ static PVRSRV_ERROR PhysHeapCreatePropertiesString(PHYS_HEAP *psPhysHeap,
{
iCount = OSSNPrintf(pszPhysHeapString,
ui32Size,
- "0x%p -> PdMs: %s, Type: %s, "
+ "0x"IMG_KM_PTR_FMTSPEC" -> PdMs: %s, Type: %s, "
"Usage Flags: 0x%08x (%s), Refs: %d, "
"Free Size: %"IMG_UINT64_FMTSPEC"B, "
"Total Size: %"IMG_UINT64_FMTSPEC"B",
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/physmem.c b/drivers/gpu/img-rogue/23.2/services/server/common/physmem.c
index ed2f2f5..e729683 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/physmem.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/physmem.c
@@ -354,6 +354,16 @@ PVRSRV_ERROR PhysMemValidateParams(IMG_UINT32 ui32NumPhysChunks,
return PVRSRV_ERROR_INVALID_FLAGS;
}
+ /* Sparse allocations must be backed immediately as the requested
+ * pui32MappingTable is not retained in any structure if not immediately
+ * actioned on allocation.
+ */
+ if (PVRSRV_CHECK_ON_DEMAND(uiFlags) && bIsSparse)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid to specify ON_DEMAND for a sparse allocation: 0x%" IMG_UINT64_FMTSPECX, __func__, uiFlags));
+ return PVRSRV_ERROR_INVALID_FLAGS;
+ }
+
if (ui32NumVirtChunks == 0)
{
PVR_DPF((PVR_DBG_ERROR, "%s: Number of virtual chunks cannot be 0",
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/physmem_lma.c b/drivers/gpu/img-rogue/23.2/services/server/common/physmem_lma.c
index 4a5cf61..cbbc44b 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/physmem_lma.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/physmem_lma.c
@@ -162,6 +162,11 @@ typedef struct _PMR_LMALLOCARRAY_DATA_ {
} PMR_LMALLOCARRAY_DATA;
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pui32FreeIndices,
+ IMG_UINT32 ui32FreeChunkCount);
+
#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
/* Global structure to manage GPU memory leak */
static DEFINE_MUTEX(g_sLMALeakMutex);
@@ -1881,6 +1886,130 @@ _FreeLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData,
return PVRSRV_OK;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+static PVRSRV_ERROR PMRFreeZombiePagesRAMem(PMR_IMPL_ZOMBIEPAGES pvPriv)
+{
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psZombiePageArray = pvPriv;
+
+ eError = _FreeLMPages(psZombiePageArray, NULL, 0);
+ PVR_GOTO_IF_ERROR(eError, e0);
+
+ _FreeLMPageArray(psZombiePageArray);
+ return PVRSRV_OK;
+e0:
+ return eError;
+}
+
+/* Allocates a new PMR_LMALLOCARRAY_DATA object and fills it with
+ * pages to be extracted from psSrcPageArrayData.
+ */
+static PVRSRV_ERROR
+_ExtractPages(PMR_LMALLOCARRAY_DATA *psSrcPageArrayData,
+ IMG_UINT32 *pai32ExtractIndices,
+ IMG_UINT32 ui32ExtractPageCount,
+ PMR_LMALLOCARRAY_DATA **psOutPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32ExtractPageCountSaved;
+ PMR_LMALLOCARRAY_DATA* psDstPageArrayData;
+
+ /* Alloc PMR_LMALLOCARRAY_DATA for the extracted pages */
+ eError = _AllocLMPageArray((IMG_UINT64)ui32ExtractPageCount << psSrcPageArrayData->uiLog2ChunkSize,
+ ui32ExtractPageCount,
+ ui32ExtractPageCount,
+ psSrcPageArrayData->uiLog2ChunkSize,
+ psSrcPageArrayData->ui32Flags,
+ psSrcPageArrayData->psPhysHeap,
+ psSrcPageArrayData->uiAllocFlags,
+ psSrcPageArrayData->uiPid,
+ &psDstPageArrayData,
+ psSrcPageArrayData->psConnection);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPageArray", alloc_error);
+
+ psDstPageArrayData->psArena = psSrcPageArrayData->psArena;
+
+ ui32ExtractPageCountSaved = ui32ExtractPageCount;
+ /* Transfer pages from source base array to newly allocated page array */
+ eError = RA_TransferMultiSparseIndices(psSrcPageArrayData->psArena,
+ psSrcPageArrayData->aBaseArray,
+ psSrcPageArrayData->uiTotalNumChunks,
+ psDstPageArrayData->aBaseArray,
+ psDstPageArrayData->uiTotalNumChunks,
+ psSrcPageArrayData->uiLog2ChunkSize,
+ pai32ExtractIndices,
+ &ui32ExtractPageCountSaved);
+ PVR_LOG_GOTO_IF_FALSE((eError == PVRSRV_OK) && (ui32ExtractPageCountSaved == ui32ExtractPageCount),
+ "RA_TransferMultiSparseIndices failed",
+ transfer_error);
+
+
+ /* Update page counts */
+ psSrcPageArrayData->iNumChunksAllocated -= ui32ExtractPageCount;
+ psDstPageArrayData->iNumChunksAllocated += ui32ExtractPageCount;
+
+ *psOutPageArrayData = psDstPageArrayData;
+
+ return PVRSRV_OK;
+transfer_error:
+ _FreeLMPageArray(psDstPageArrayData);
+alloc_error:
+ return eError;
+}
+
+/* Extracts all allocated pages referenced psSrcPageArrayData
+ * Allocates a new PMR_OSPAGEARRAY_DATA object and fills it with the extracted
+ * pages information.
+ */
+static PVRSRV_ERROR
+_ExtractAllPages(PMR_LMALLOCARRAY_DATA *psSrcPageArrayData,
+ PMR_LMALLOCARRAY_DATA **psOutPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA* psDstPageArrayData;
+ IMG_UINT32 ui32IdxSrc, ui32IdxDst;
+
+ if (psSrcPageArrayData->iNumChunksAllocated == 0)
+ {
+ /* Do nothing if psSrcPageArrayData contains no allocated pages */
+ return PVRSRV_OK;
+ }
+
+ /* Alloc PMR_LMALLOCARRAY_DATA for the extracted pages */
+ eError = _AllocLMPageArray((IMG_UINT64)psSrcPageArrayData->iNumChunksAllocated << psSrcPageArrayData->uiLog2ChunkSize,
+ psSrcPageArrayData->iNumChunksAllocated,
+ psSrcPageArrayData->uiTotalNumChunks,
+ psSrcPageArrayData->uiLog2ChunkSize,
+ psSrcPageArrayData->ui32Flags,
+ psSrcPageArrayData->psPhysHeap,
+ psSrcPageArrayData->uiAllocFlags,
+ psSrcPageArrayData->uiPid,
+ &psDstPageArrayData,
+ psSrcPageArrayData->psConnection);
+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "_AllocLMPageArray failed in %s", __func__);
+
+ /* Now do the transfer */
+ ui32IdxDst=0;
+ for (ui32IdxSrc=0; ((ui32IdxDst<psSrcPageArrayData->iNumChunksAllocated) &&
+ (psDstPageArrayData->iNumChunksAllocated<psSrcPageArrayData->iNumChunksAllocated)); ui32IdxSrc++)
+ {
+ if (psSrcPageArrayData->aBaseArray[ui32IdxSrc] != INVALID_BASE_ADDR)
+ {
+ psDstPageArrayData->aBaseArray[ui32IdxDst++] = psSrcPageArrayData->aBaseArray[ui32IdxSrc];
+ psSrcPageArrayData->aBaseArray[ui32IdxSrc] = INVALID_BASE_ADDR;
+ psDstPageArrayData->iNumChunksAllocated++;
+ }
+ }
+
+ /* Update src page count */
+ psSrcPageArrayData->iNumChunksAllocated = 0;
+
+ *psOutPageArrayData = psDstPageArrayData;
+
+ return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */
+
static PVRSRV_ERROR
_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
IMG_UINT32 *pui32FreeIndices,
@@ -2088,21 +2217,53 @@ PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
return PVRSRV_OK;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages)
+#else
static PVRSRV_ERROR
PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+#endif
{
PVRSRV_ERROR eError = PVRSRV_OK;
- PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_LMALLOCARRAY_DATA *psExtractedPagesPageArray = NULL;
- psLMAllocArrayData = pvPriv;
+ *ppvZombiePages = NULL;
+#endif
if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_ONDEMAND))
{
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ if (psLMAllocArrayData->iNumChunksAllocated == 0)
+ {
+ *ppvZombiePages = NULL;
+ return PVRSRV_OK;
+ }
+
+ eError = _ExtractAllPages(psLMAllocArrayData,
+ &psExtractedPagesPageArray);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_ExtractAllPages", e0);
+
+ if (psExtractedPagesPageArray)
+ {
+ /* Zombify pages to get proper stats */
+ eError = PMRZombifyLocalMem(psExtractedPagesPageArray, NULL);
+ PVR_WARN_IF_ERROR(eError, "PMRZombifyLocalMem");
+ }
+ *ppvZombiePages = psExtractedPagesPageArray;
+#else
/* Free Memory for deferred allocation */
eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
PVR_RETURN_IF_ERROR(eError);
+#endif
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+e0:
+#endif
PVR_ASSERT(eError == PVRSRV_OK);
return eError;
}
@@ -2481,6 +2642,9 @@ PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
IMG_UINT32 *pai32AllocIndices,
IMG_UINT32 ui32FreePageCount,
IMG_UINT32 *pai32FreeIndices,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages,
+#endif
IMG_UINT32 uiFlags)
{
PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
@@ -2506,6 +2670,10 @@ PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
RA_BASE_T *paBaseArray = psPMRPageArrayData->aBaseArray;
PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR);
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ *ppvZombiePages = NULL;
+#endif
+
/* The incoming request is classified into two operations independent of
* each other: alloc & free chunks.
* These operations can be combined with two mapping operations as well
@@ -2678,13 +2846,25 @@ PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
}
}
- /* Free the additional free chunks */
+ /* Free or zombie the additional free chunks */
if (0 != ui32AdtnlFreePages)
{
- ui32Index = ui32Loop;
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_LMALLOCARRAY_DATA *psExtractedPagesPageArray = NULL;
+
+ eError = _ExtractPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages, &psExtractedPagesPageArray);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_ExtractPages", e0);
+
+ /* Zombify pages to get proper stats */
+ eError = PMRZombifyLocalMem(psExtractedPagesPageArray, NULL);
+ PVR_LOG_IF_ERROR(eError, "PMRZombifyLocalMem");
+
+ *ppvZombiePages = psExtractedPagesPageArray;
+#else
eError = _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
PVR_LOG_GOTO_IF_ERROR(eError, "_FreeLMPages", e0);
-
+#endif /* SUPPORT_PMR_PAGES_DEFERRED_FREE */
+ ui32Index = ui32Loop;
ui32Loop = 0;
while (ui32Loop++ < ui32AdtnlFreePages)
@@ -2796,6 +2976,9 @@ static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
.pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapLocalMem,
.pfnMMap = NULL,
.pfnFinalize = &PMRFinalizeLocalMem,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ .pfnFreeZombiePages = &PMRFreeZombiePagesRAMem,
+#endif
#if defined(SUPPORT_PMR_DEFERRED_FREE)
.pfnZombify = &PMRZombifyLocalMem,
#endif
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/pmr.c b/drivers/gpu/img-rogue/23.2/services/server/common/pmr.c
index c3cc242..3126fd1 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/pmr.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/pmr.c
@@ -147,6 +147,49 @@ static struct _PMR_CTX_
IMG_BOOL bModuleInitialised;
} _gsSingletonPMRContext = { 1, 0, {0}, NULL, IMG_FALSE };
+#if defined(SUPPORT_PMR_DEFERRED_FREE)
+typedef enum _PMR_ZOMBIE_TYPE_ {
+ PMR_ZOMBIE_TYPE_PMR,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_ZOMBIE_TYPE_PAGES,
+#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ PMR_ZOMBIE_TYPE_DEVICE_IMPORT,
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+} PMR_ZOMBIE_TYPE;
+
+typedef struct _PMR_HEADER_
+{
+ /* List node used to put the header on the zombie list
+ * (psDevNode->sPMRZombieList). */
+ DLLIST_NODE sZombieNode;
+
+ PMR_ZOMBIE_TYPE eZombieType;
+} PMR_HEADER;
+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+/*
+ * A structure describing zombie pages.
+ */
+typedef struct _PMR_ZOMBIE_PAGES_
+{
+ PMR_HEADER sHeader;
+ PMR_IMPL_ZOMBIEPAGES pvFactoryPages;
+ PFN_FREE_ZOMBIE_PAGES_FN pfnFactoryFreeZombies;
+} PMR_ZOMBIE_PAGES;
+#endif
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+typedef struct _PMR_DEVICE_IMPORT_
+{
+ PMR_HEADER sHeader; /* psDevNode zombie queue list node. */
+ DLLIST_NODE sNext; /* PMR::sXDeviceImports list node. */
+ PVRSRV_DEVICE_NODE *psDevNode; /* Device this import is representing. */
+ PMR *psParent; /* PMR the import belongs to. */
+} PMR_DEVICE_IMPORT;
+#endif
+
/* A PMR. One per physical allocation. May be "shared".
*
* "shared" is ambiguous. We need to be careful with terminology.
@@ -164,9 +207,8 @@ static struct _PMR_CTX_
struct _PMR_
{
#if defined(SUPPORT_PMR_DEFERRED_FREE)
- /* List node used to put the PMR on the zombie list
- * (psDevNode->sPMRZombieList). */
- DLLIST_NODE sZombieNode;
+ /* A Common header structure shared between PMR and PMR-like PMR_ZOMBIE_PAGES object */
+ PMR_HEADER sHeader;
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
/* This object is strictly refcounted. References include:
@@ -193,6 +235,12 @@ struct _PMR_
*/
ATOMIC_T iCpuMapCount;
+ /* Count of how many reservations refer to this
+ * PMR as a part of a GPU mapping. Must be protected
+ * by PMR lock.
+ */
+ IMG_INT32 iAssociatedResCount;
+
/* Lock count - this is the number of times PMRLockSysPhysAddresses()
* has been called, less the number of PMRUnlockSysPhysAddresses()
* calls. This is arguably here for debug reasons only, as the refcount
@@ -205,6 +253,16 @@ struct _PMR_
/* Lock for this structure */
POS_LOCK hLock;
+ /* Protects: `uiInternalFlags` & `uiDevImportBitmap` */
+ POS_SPINLOCK hBitmapLock;
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ /* See PMR_ImportedDevicesMask()
+ * Protected by hBitmapLock. */
+ IMG_UINT64 uiDevImportBitmap;
+ /* List of PMR_DEVICE_IMPORT's */
+ DLLIST_NODE sXDeviceImports;
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
/* Incrementing serial number to each allocation. */
IMG_UINT64 uiSerialNum;
@@ -484,9 +542,18 @@ _PMRCreate(PMR_SIZE_T uiLogicalSize,
return eError;
}
+ eError = OSSpinLockCreate(&psPMR->hBitmapLock);
+ if (eError != PVRSRV_OK)
+ {
+ OSLockDestroy(psPMR->hLock);
+ OSFreeMem(psPMR);
+ return eError;
+ }
+
/* Setup the PMR */
OSAtomicWrite(&psPMR->iRefCount, 0);
OSAtomicWrite(&psPMR->iCpuMapCount, 0);
+ psPMR->iAssociatedResCount = 0;
/* If allocation is not made on demand, it will be backed now and
* backing will not be removed until the PMR is destroyed, therefore
@@ -502,8 +569,16 @@ _PMRCreate(PMR_SIZE_T uiLogicalSize,
psPMR->uiInternalFlags = bSparse ? PMR_FLAG_INTERNAL_SPARSE_ALLOC : 0;
psPMR->szAnnotation[0] = '\0';
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ psPMR->uiDevImportBitmap = 0;
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
#if defined(SUPPORT_PMR_DEFERRED_FREE)
- dllist_init(&psPMR->sZombieNode);
+ psPMR->sHeader.eZombieType = PMR_ZOMBIE_TYPE_PMR;
+ dllist_init(&psPMR->sHeader.sZombieNode);
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ dllist_init(&psPMR->sXDeviceImports);
+#endif /* defined(SUPPORT_DEVICE_IMPORT_DEFERRED_FREE) */
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
@@ -566,6 +641,36 @@ PMRUnlockPMR(PMR *psPMR)
OSLockRelease(psPMR->hLock); /* Uses same lock as PhysAddresses */
}
+static INLINE void _IntFlagSet(PMR *psPMR, const IMG_UINT32 uiValue)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ BITMASK_SET(psPMR->uiInternalFlags, uiValue);
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+}
+
+static INLINE void _IntFlagClr(PMR *psPMR, const IMG_UINT32 uiValue)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ BITMASK_UNSET(psPMR->uiInternalFlags, uiValue);
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+}
+
+static INLINE IMG_BOOL _IntFlagIsSet(const PMR *psPMR, const IMG_UINT32 uiValue)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+ IMG_BOOL bIsSet;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ bIsSet = BITMASK_HAS(psPMR->uiInternalFlags, uiValue);
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+
+ return bIsSet;
+}
+
static INLINE void
_FactoryLock(const PMR_IMPL_FUNCTAB *psFuncTable)
{
@@ -584,6 +689,243 @@ _FactoryUnlock(const PMR_IMPL_FUNCTAB *psFuncTable)
}
}
+#if defined(SUPPORT_PMR_DEFERRED_FREE)
+/* Protects:
+ * - `psDevNode->sPMRZombieList`
+ * - `uiPMRZombieCount`
+ * - `uiPMRZombieCountInCleanup`
+ *
+ * and all `PMR_ZOMBIE_CLEANUP_ITEM::sZombieList` where
+ * `PMR_ZOMBIE_CLEANUP_ITEM::psDevNode == psDevNode` */
+static INLINE void
+_ZombieListLock(PPVRSRV_DEVICE_NODE psDevNode)
+{
+ OSLockAcquire(psDevNode->hPMRZombieListLock);
+}
+
+static INLINE void
+_ZombieListUnlock(PPVRSRV_DEVICE_NODE psDevNode)
+{
+ OSLockRelease(psDevNode->hPMRZombieListLock);
+}
+
+static IMG_BOOL _IsDeviceOnAndOperating(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ /* Treat unknown power state as ON. */
+ ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+ }
+
+ /* The device does not accept zombies when its power is OFF as
+ * the cache invalidation comes as a given. */
+ return !( ePowerState == PVRSRV_DEV_POWER_STATE_OFF
+ || psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR);
+}
+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+static IMG_UINT64
+_DeviceImportBitmapGet(const PMR *psPMR)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+ IMG_UINT64 uiDevImportBitmap;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ uiDevImportBitmap = psPMR->uiDevImportBitmap;
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+
+ return uiDevImportBitmap;
+}
+
+static void
+_DeviceImportBitmapClr(PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ BITMASK_UNSET(psPMR->uiDevImportBitmap, IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID);
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+}
+
+static IMG_BOOL
+_DeviceImportBitmapIsSet(const PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+ IMG_BOOL bIsSet;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ bIsSet = BITMASK_HAS(psPMR->uiDevImportBitmap,
+ IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID);
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+
+ return bIsSet;
+}
+
+static IMG_BOOL
+/* Atomically, return if the `psDevNode` is set in the bitmap and then set it. */
+_DeviceImportBitmapFetchAndSet(PMR *psPMR, const PPVRSRV_DEVICE_NODE psDevNode)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+ IMG_BOOL bIsSet;
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ bIsSet = BITMASK_HAS(psPMR->uiDevImportBitmap,
+ IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID);
+ BITMASK_SET(psPMR->uiDevImportBitmap,
+ IMG_UINT64_C(1) << psDevNode->sDevId.ui32InternalID);
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+
+ return bIsSet;
+}
+
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+static PVRSRV_ERROR
+_DeviceImportRegister(PMR *psPMR, PPVRSRV_DEVICE_NODE psDevNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PMR_DEVICE_IMPORT *psImport;
+
+ PVR_ASSERT(psPMR);
+ PVR_ASSERT(psDevNode);
+ PVR_ASSERT(PMR_DeviceNode(psPMR) != psDevNode);
+
+ /* Explicitly reject:
+ * - PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC
+ * - !PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE
+ * as XD PMRs don't have support for
+ * SUPPORT_PMR_PAGES_DEFERRED_FREE. */
+ if (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ||
+ !_IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE))
+ {
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ PVR_LOG_ERROR(eError,
+ "PVRSRV_CHECK_ON_DEMAND || !PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE");
+ return eError;
+ }
+
+ /* Check if the device is already imported */
+ if (_DeviceImportBitmapFetchAndSet(psPMR, psDevNode))
+ {
+ return PVRSRV_OK;
+ }
+
+ psImport = OSAllocMem(sizeof(*psImport));
+ PVR_LOG_RETURN_IF_NOMEM(psImport, "PMR_DEVICE_IMPORT");
+
+ psImport->psParent = psPMR;
+ psImport->psDevNode = psDevNode;
+ dllist_init(&psImport->sHeader.sZombieNode);
+ psImport->sHeader.eZombieType = PMR_ZOMBIE_TYPE_DEVICE_IMPORT;
+
+ PMRLockPMR(psPMR);
+ dllist_add_to_tail(&psPMR->sXDeviceImports, &psImport->sNext);
+ PMRUnlockPMR(psPMR);
+
+ return eError;
+}
+
+static void
+_DeviceImportFreeImportZombie(PMR_DEVICE_IMPORT *psImport)
+{
+ PVR_ASSERT(_DeviceImportBitmapIsSet(psImport->psParent, psImport->psDevNode));
+ _DeviceImportBitmapClr(psImport->psParent, psImport->psDevNode);
+
+ PMRLockPMR(psImport->psParent);
+ dllist_remove_node(&psImport->sNext);
+ PMRUnlockPMR(psImport->psParent);
+
+ OSFreeMem(psImport);
+}
+
+static IMG_BOOL
+_DeviceImportEnqueueZombie(PMR_DEVICE_IMPORT *psImport)
+{
+ PVR_ASSERT(_DeviceImportBitmapIsSet(psImport->psParent, psImport->psDevNode));
+
+ if (!_IsDeviceOnAndOperating(psImport->psDevNode))
+ {
+ _DeviceImportFreeImportZombie(psImport);
+ return IMG_FALSE;
+ }
+
+ _ZombieListLock(psImport->psDevNode);
+ dllist_add_to_tail(&psImport->psDevNode->sPMRZombieList,
+ &psImport->sHeader.sZombieNode);
+ psImport->psDevNode->uiPMRZombieCount++;
+ _ZombieListUnlock(psImport->psDevNode);
+
+ return IMG_TRUE;
+}
+
+static void
+_DeviceImportsReviveZombies(PMR *psPMR)
+{
+ PDLLIST_NODE psNode, psNext;
+ PMR_DEVICE_IMPORT *psImport;
+
+ dllist_foreach_node(&psPMR->sXDeviceImports, psNode, psNext)
+ {
+ psImport = IMG_CONTAINER_OF(psNode, PMR_DEVICE_IMPORT, sNext);
+ _ZombieListLock(psImport->psDevNode);
+ if (!dllist_is_empty(&psImport->sHeader.sZombieNode))
+ {
+ dllist_remove_node(&psImport->sHeader.sZombieNode);
+ psImport->psDevNode->uiPMRZombieCount--;
+ }
+ _ZombieListUnlock(psImport->psDevNode);
+ }
+}
+
+static IMG_BOOL
+_DeviceImportsEnqueueZombies(PMR *psPMR)
+{
+ PDLLIST_NODE psNode, psNext;
+ PMR_DEVICE_IMPORT *psImport;
+ IMG_BOOL bEnqueued = IMG_FALSE;
+
+ PMRLockPMR(psPMR);
+
+ dllist_foreach_node(&psPMR->sXDeviceImports, psNode, psNext)
+ {
+ psImport = IMG_CONTAINER_OF(psNode, PMR_DEVICE_IMPORT, sNext);
+ bEnqueued |= _DeviceImportEnqueueZombie(psImport);
+ }
+
+ PMRUnlockPMR(psPMR);
+
+ return bEnqueued;
+}
+
+static void
+_DeviceImportsUnregisterAll(PMR *psPMR)
+{
+ OS_SPINLOCK_FLAGS uiLockingFlags;
+ PDLLIST_NODE psNode, psNext;
+
+ PMRLockPMR(psPMR);
+ dllist_foreach_node(&psPMR->sXDeviceImports, psNode, psNext)
+ {
+ PMR_DEVICE_IMPORT *psImport = IMG_CONTAINER_OF(psNode, PMR_DEVICE_IMPORT, sNext);
+ PVR_ASSERT(_DeviceImportBitmapIsSet(psPMR, psImport->psDevNode));
+ OSFreeMem(psImport);
+ }
+ dllist_init(&psPMR->sXDeviceImports);
+
+ OSSpinLockAcquire(psPMR->hBitmapLock, uiLockingFlags);
+ psPMR->uiDevImportBitmap = 0;
+ OSSpinLockRelease(psPMR->hBitmapLock, uiLockingFlags);
+ PMRUnlockPMR(psPMR);
+}
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
static void
_PMRDestroy(PMR *psPMR)
{
@@ -630,6 +972,10 @@ _PMRDestroy(PMR *psPMR)
}
#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ _DeviceImportsUnregisterAll(psPMR);
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
/* Decrement live PMR count. Probably only of interest for debugging */
PVR_ASSERT(OSAtomicRead(&psPMR->psContext->uiNumLivePMRs) > 0);
OSAtomicDecrement(&psPMR->psContext->uiNumLivePMRs);
@@ -637,19 +983,57 @@ _PMRDestroy(PMR *psPMR)
PVR_DPF((PVR_DBG_MESSAGE, "%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d",
__func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs)));
+ OSSpinLockDestroy(psPMR->hBitmapLock);
OSLockDestroy(psPMR->hLock);
OSFreeMem(psPMR);
}
+#if defined(SUPPORT_PMR_DEFERRED_FREE)
+static INLINE PMR_ZOMBIE_TYPE
+PMR_GetZombieTypeFromNode(const DLLIST_NODE *psNode)
+{
+ PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode);
+ PVR_ASSERT(psPMRHeader != NULL);
+ return psPMRHeader->eZombieType;
+}
+
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+static INLINE PMR_ZOMBIE_PAGES*
+PMR_GetZombiePagesFromNode(const DLLIST_NODE *psNode)
+{
+ PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode);
+ PVR_ASSERT(psPMRHeader != NULL);
+ return IMG_CONTAINER_OF(psPMRHeader, PMR_ZOMBIE_PAGES, sHeader);
+}
+#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+static INLINE PMR_DEVICE_IMPORT*
+PMR_GetDeviceImportFromNode(const DLLIST_NODE *psNode)
+{
+ PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode);
+ PVR_ASSERT(psPMRHeader != NULL);
+ return IMG_CONTAINER_OF(psPMRHeader, PMR_DEVICE_IMPORT, sHeader);
+}
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+static INLINE PMR*
+PMR_GetPMRFromNode(const DLLIST_NODE *psNode)
+{
+ PMR_HEADER *psPMRHeader = IMG_CONTAINER_OF(psNode, PMR_HEADER, sZombieNode);
+ PVR_ASSERT(psPMRHeader != NULL);
+ return IMG_CONTAINER_OF(psPMRHeader, PMR, sHeader);
+}
+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
static void
_UnrefAndMaybeDestroy(PMR *psPMR)
{
const PMR_IMPL_FUNCTAB *psFuncTable;
IMG_INT iRefCount;
#if defined(SUPPORT_PMR_DEFERRED_FREE)
- PVRSRV_DEV_POWER_STATE ePowerState;
PVRSRV_DEVICE_NODE *psDevNode;
- PVRSRV_ERROR eError;
+ IMG_BOOL bQueuedDeviceImports = IMG_FALSE;
#endif
PVR_ASSERT(psPMR != NULL);
@@ -672,32 +1056,34 @@ _UnrefAndMaybeDestroy(PMR *psPMR)
#else /* !defined(SUPPORT_PMR_DEFERRED_FREE) */
psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap);
- eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState);
- if (eError != PVRSRV_OK)
+ /* PMRs that are not marked for deferred free can be freed right away.
+ * Those are the PMRs that have not been mapped to the device.
+ * All PMRs that have been mapped to the device need to go through
+ * the defer free path unless the power is OFF for the PMR's device
+ * and for all of the device imports. If power is OFF
+ * the cache invalidation comes as a given. */
+ if (!_IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_DEFER_FREE))
{
- /* Treat unknown power state as ON. */
- ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+ _PMRDestroy(psPMR);
+ goto exit_;
}
- /* PMRs that are not marked for deferred free can be freed right away.
- * Those are the PMRs that are not device mappable (so only CPU
- * readable/writeable).
- * All PMRs that are device mappable need to go through the defer free
- * path unless the power is OFF. If power is OFF the cache invalidation
- * comes as a given. */
- if (!BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_DEFER_FREE) ||
- ePowerState == PVRSRV_DEV_POWER_STATE_OFF ||
- psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR)
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ bQueuedDeviceImports = _DeviceImportsEnqueueZombies(psPMR);
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+ if (!bQueuedDeviceImports && !_IsDeviceOnAndOperating(psDevNode))
{
_PMRDestroy(psPMR);
}
else
{
/* Defer freeing the PMR until the Firmware invalidates the caches. */
- OSLockAcquire(psDevNode->hPMRZombieListLock);
+ _ZombieListLock(psDevNode);
- BITMASK_SET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_IS_ZOMBIE);
- dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMR->sZombieNode);
+ _IntFlagSet(psPMR, PMR_FLAG_INTERNAL_IS_ZOMBIE);
+
+ dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMR->sHeader.sZombieNode);
psDevNode->uiPMRZombieCount++;
/* PMR pages are accounted by the driver/process stats. Those stats
@@ -712,8 +1098,9 @@ _UnrefAndMaybeDestroy(PMR *psPMR)
PVR_LOG_IF_ERROR(eError, "pfnZombify");
}
- OSLockRelease(psDevNode->hPMRZombieListLock);
+ _ZombieListUnlock(psDevNode);
}
+exit_:
#endif /* !defined(SUPPORT_PMR_DEFERRED_FREE) */
_FactoryUnlock(psFuncTable);
@@ -723,25 +1110,13 @@ _UnrefAndMaybeDestroy(PMR *psPMR)
typedef struct _PMR_ZOMBIE_CLEANUP_ITEM_
{
PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
- DLLIST_NODE sZombieList;
+ DLLIST_NODE sZombieList; /*!< See _ZombieListLock */
PPVRSRV_DEVICE_NODE psDevNode;
PVRSRV_CLIENT_SYNC_PRIM *psSync;
IMG_UINT32 uiRequiredSyncValue;
IMG_UINT32 uiRequiredPowerOffCounter;
} PMR_ZOMBIE_CLEANUP_ITEM;
-static INLINE void
-_ZombieListLock(PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem)
-{
- OSLockAcquire(psCleanupItem->psDevNode->hPMRZombieListLock);
-}
-
-static INLINE void
-_ZombieListUnlock(PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem)
-{
- OSLockRelease(psCleanupItem->psDevNode->hPMRZombieListLock);
-}
-
static INLINE IMG_BOOL
_CanNotFreeZombies(const PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem)
{
@@ -760,17 +1135,18 @@ static PVRSRV_ERROR _PmrZombieCleanup(void *pvData)
{
PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem = pvData;
DLLIST_NODE *psNode;
+ DLLIST_NODE sRetryHead;
+ IMG_UINT32 uiRetryCount = 0;
+ PVRSRV_ERROR eError = PVRSRV_OK;
if (_CanNotFreeZombies(psCleanupItem))
{
return PVRSRV_ERROR_RETRY;
}
+ dllist_init(&sRetryHead);
do
{
- PMR *psPMR;
- const PMR_IMPL_FUNCTAB *psFuncTable;
-
/* hPMRZombieListLock will prevent removing a node while the list is
* processed. If the lock is already acquired by other process which
* intends to remove an item from the list it'll assure the list
@@ -778,43 +1154,125 @@ static PVRSRV_ERROR _PmrZombieCleanup(void *pvData)
* If this thread acquires the lock first it's possible that another
* thread might be holding PMR factory lock. */
- _ZombieListLock(psCleanupItem);
+ _ZombieListLock(psCleanupItem->psDevNode);
psNode = dllist_get_next_node(&psCleanupItem->sZombieList);
- _ZombieListUnlock(psCleanupItem);
+ _ZombieListUnlock(psCleanupItem->psDevNode);
- if (psNode != NULL)
+ if (psNode == NULL)
{
- psPMR = IMG_CONTAINER_OF(psNode, PMR, sZombieNode);
- psFuncTable = psPMR->psFuncTab;
+ continue;
+ }
- _FactoryLock(psFuncTable);
- _ZombieListLock(psCleanupItem);
-
- /* It is possible that the element might have been removed so
- * we have to check if the PMR is still a zombie. */
-
- if (PMR_IsZombie(psPMR))
+ switch (PMR_GetZombieTypeFromNode(psNode))
+ {
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ case PMR_ZOMBIE_TYPE_PAGES:
{
+ PMR_ZOMBIE_PAGES* psZombiePages = PMR_GetZombiePagesFromNode(psNode);
+ eError = psZombiePages->pfnFactoryFreeZombies(psZombiePages->pvFactoryPages);
+ _ZombieListLock(psCleanupItem->psDevNode);
dllist_remove_node(psNode);
psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--;
- /* Unlock here to avoid locking dependency with the power lock.
- * It's okay to do it here since the factory lock is the one
- * that needs to be held during PMR destruction. */
- _ZombieListUnlock(psCleanupItem);
+ _ZombieListUnlock(psCleanupItem->psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot free zombie pages! Skipping object %p", psZombiePages));
+ dllist_add_to_tail(&sRetryHead, psNode);
+ uiRetryCount++;
+ }
+ else
+ {
+ OSFreeMem(psZombiePages);
+ }
+ break;
+ }
+#endif
- _PMRDestroy(psPMR);
- }
- else
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ case PMR_ZOMBIE_TYPE_DEVICE_IMPORT:
{
- _ZombieListUnlock(psCleanupItem);
+ PMR_DEVICE_IMPORT *psImport = PMR_GetDeviceImportFromNode(psNode);
+ _ZombieListLock(psCleanupItem->psDevNode);
+ dllist_remove_node(psNode);
+ psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--;
+ _ZombieListUnlock(psCleanupItem->psDevNode);
+ _DeviceImportFreeImportZombie(psImport);
+ break;
}
- _FactoryUnlock(psFuncTable);
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+ case PMR_ZOMBIE_TYPE_PMR:
+ {
+ PMR* psPMR = PMR_GetPMRFromNode(psNode);
+ const PMR_IMPL_FUNCTAB *psFuncTable = psPMR->psFuncTab;
+
+ _FactoryLock(psFuncTable);
+ _ZombieListLock(psCleanupItem->psDevNode);
+ /* It is possible that the element might have been removed so
+ * we have to check if the PMR is still a zombie.
+ * It's also possible that the PMR has been revived
+ * (PMRReviveZombieAndRef()), mapped, unmapped and zombified
+ * again while the lock was not held.
+ * Considering above only immediately free the PMR if the
+ * PMR is still a part of this cleanup item. */
+ if (psNode == dllist_get_next_node(&psCleanupItem->sZombieList))
+ {
+ dllist_remove_node(psNode);
+ psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--;
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ /* The PMR cannot be freed if other devices are
+ * still waiting for the cache flush. */
+ if (_DeviceImportBitmapGet(psPMR) != 0)
+ {
+ /* Request it to be retried and continue
+ * to the next zombie item. */
+ dllist_add_to_tail(&sRetryHead, psNode);
+ uiRetryCount++;
+ _ZombieListUnlock(psCleanupItem->psDevNode);
+ _FactoryUnlock(psFuncTable);
+ continue;
+ }
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+ /* Unlock here to avoid locking dependency with the power lock.
+ * It's okay to do it here since the factory lock is the one
+ * that needs to be held during PMR destruction. */
+ _ZombieListUnlock(psCleanupItem->psDevNode);
+ _PMRDestroy(psPMR);
+ }
+ else
+ {
+ _ZombieListUnlock(psCleanupItem->psDevNode);
+ }
+ _FactoryUnlock(psFuncTable);
+ break;
+ }
+
+ default:
+ PVR_ASSERT(!"Unknown Zombie Type!");
+ break;
}
} while (psNode != NULL);
- OSFreeMem(psCleanupItem);
+ if (uiRetryCount)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ _ZombieListLock(psCleanupItem->psDevNode);
+ /* Add the retry items back to this cleanup item for when the
+ * cleanup item is retried. Oldest items will reside at the head of
+ * the list. The cleanup item will be placed at the back of the cleanup
+ * queue to process other dependencies first. */
+ dllist_insert_list_at_head(&psCleanupItem->sZombieList, &sRetryHead);
+ psCleanupItem->psDevNode->uiPMRZombieCountInCleanup += uiRetryCount;
+ _ZombieListUnlock(psCleanupItem->psDevNode);
+ }
+ else
+ {
+ OSFreeMem(psCleanupItem);
+ }
- return PVRSRV_OK;
+ return eError;
}
IMG_BOOL PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode)
@@ -829,18 +1287,18 @@ IMG_BOOL PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode)
return IMG_FALSE;
}
- OSLockAcquire(psDevNode->hPMRZombieListLock);
+ _ZombieListLock(psDevNode);
if (dllist_is_empty(&psDevNode->sPMRZombieList))
{
- OSLockRelease(psDevNode->hPMRZombieListLock);
+ _ZombieListUnlock(psDevNode);
return IMG_FALSE;
}
psCleanupItem = OSAllocMem(sizeof(*psCleanupItem));
if (psCleanupItem == NULL)
{
- OSLockRelease(psDevNode->hPMRZombieListLock);
+ _ZombieListUnlock(psDevNode);
return IMG_FALSE;
}
@@ -869,10 +1327,18 @@ IMG_BOOL PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode)
}
void
-PMRDequeueZombieAndRef(PMR *psPMR)
+PMRReviveZombieAndRef(PMR *psPMR)
{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DLLIST_NODE *psThis, *psNext;
+ IMG_BOOL bIsOnZombieList = IMG_FALSE;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ psDeviceNode = PhysHeapDeviceNode(psPMR->psPhysHeap);
+
/* If this was on a list then it's brought back to life. */
- OSLockAcquire(PhysHeapDeviceNode(psPMR->psPhysHeap)->hPMRZombieListLock);
+ _ZombieListLock(psDeviceNode);
/* Need to reference this PMR since it was about to be destroyed and its
* reference count must be 0 (can't use _Ref() due to the warning). */
@@ -885,15 +1351,41 @@ PMRDequeueZombieAndRef(PMR *psPMR)
/* If we got to this point the PMR must be on a list. If it's not
* it should mean a race of some sort. */
- PVR_ASSERT(!dllist_is_empty(&psPMR->sZombieNode));
+ PVR_ASSERT(!dllist_is_empty(&psPMR->sHeader.sZombieNode));
+
+ /* For the sake of correct accounting check if the PMR is in the zombie
+ * list or in the cleanup item. */
+ dllist_foreach_node(&psDeviceNode->sPMRZombieList, psThis, psNext)
+ {
+ if (psThis == &psPMR->sHeader.sZombieNode)
+ {
+ bIsOnZombieList = IMG_TRUE;
+ break;
+ }
+ }
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ if (_DeviceImportBitmapGet(psPMR) != 0) {
+ PMRLockPMR(psPMR);
+ _DeviceImportsReviveZombies(psPMR);
+ PMRUnlockPMR(psPMR);
+ }
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
/* Revive the PMR (remove it from the zombie list) and therefore
* prevent it's destruction. */
- dllist_remove_node(&psPMR->sZombieNode);
- BITMASK_UNSET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_IS_ZOMBIE);
- PhysHeapDeviceNode(psPMR->psPhysHeap)->uiPMRZombieCountInCleanup--;
+ dllist_remove_node(&psPMR->sHeader.sZombieNode);
+ _IntFlagClr(psPMR, PMR_FLAG_INTERNAL_IS_ZOMBIE);
- OSLockRelease(PhysHeapDeviceNode(psPMR->psPhysHeap)->hPMRZombieListLock);
+ if (bIsOnZombieList)
+ {
+ psDeviceNode->uiPMRZombieCount--;
+ }
+ else
+ {
+ psDeviceNode->uiPMRZombieCountInCleanup--;
+ }
+
+ _ZombieListUnlock(psDeviceNode);
}
void
@@ -901,13 +1393,21 @@ PMRMarkForDeferFree(PMR *psPMR)
{
PVR_ASSERT(psPMR != NULL);
- BITMASK_SET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_DEFER_FREE);
+ if (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags))
+ {
+ /* If PMR pages are allocated on demand the freeing is handled
+ * by `SUPPORT_PMR_PAGES_DEFERRED_FREE` path in
+ * `PMRUnlockSysPhysAddressesNested()`. */
+ return;
+ }
+
+ _IntFlagSet(psPMR, PMR_FLAG_INTERNAL_DEFER_FREE);
}
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
static INLINE IMG_BOOL _PMRIsSparse(const PMR *psPMR)
{
- return BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_SPARSE_ALLOC);
+ return _IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_SPARSE_ALLOC);
}
PVRSRV_ERROR
@@ -1058,9 +1558,19 @@ PVRSRV_ERROR
PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
{
PVRSRV_ERROR eError;
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_IMPL_ZOMBIEPAGES pvZombiePages = NULL;
+ PMR_ZOMBIE_PAGES* psPMRZombiePages = NULL;
+#endif
PVR_ASSERT(psPMR != NULL);
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ /* Speculatively preallocate in order to simplify error handling later */
+ psPMRZombiePages = OSAllocZMem(sizeof(PMR_ZOMBIE_PAGES));
+ PVR_GOTO_IF_NOMEM(psPMRZombiePages, eError, e0);
+#endif
+
/* Acquiring the lock here, as well as during the Lock operation ensures
* the lock count hitting zero and the unlocking of the phys addresses is
* an atomic operation
@@ -1074,7 +1584,13 @@ PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
{
PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL);
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData,
+ &pvZombiePages);
+#else
eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+#endif
+
/* must never fail */
PVR_ASSERT(eError == PVRSRV_OK);
}
@@ -1082,12 +1598,59 @@ PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
OSLockRelease(psPMR->hLock);
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ if (pvZombiePages != NULL)
+ {
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap);
+ eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ /* Treat unknown power state as ON. */
+ ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+ }
+
+ if (ePowerState == PVRSRV_DEV_POWER_STATE_OFF ||
+ psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR)
+ {
+ /* Free preallocated psPMRZombiePages as these won't be used*/
+ OSFreeMem(psPMRZombiePages);
+
+ eError = psPMR->psFuncTab->pfnFreeZombiePages(pvZombiePages);
+ PVR_LOG_GOTO_IF_ERROR(eError, "Error when trying to free zombies immediately.", e0);
+ }
+ else
+ {
+ PVR_ASSERT(psPMRZombiePages != NULL);
+ psPMRZombiePages->sHeader.eZombieType = PMR_ZOMBIE_TYPE_PAGES;
+ psPMRZombiePages->pfnFactoryFreeZombies = psPMR->psFuncTab->pfnFreeZombiePages;
+ psPMRZombiePages->pvFactoryPages = pvZombiePages;
+
+ _ZombieListLock(psDevNode);
+ dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMRZombiePages->sHeader.sZombieNode);
+ psDevNode->uiPMRZombieCount++;
+ _ZombieListUnlock(psDevNode);
+ }
+ }
+ else
+ {
+ OSFreeMem(psPMRZombiePages);
+ }
+#endif
+
/* We also count the locks as references, so that the PMR is not
* freed while someone is using a physical address.
*/
_UnrefAndMaybeDestroy(psPMR);
return PVRSRV_OK;
+
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+e0:
+#endif
+ return eError;
}
PVRSRV_ERROR
@@ -1960,6 +2523,47 @@ PMR_IsCpuMapped(PMR *psPMR)
return (OSAtomicRead(&psPMR->iCpuMapCount) > 0);
}
+void
+PMRGpuResCountIncr(PMR *psPMR)
+{
+ if (psPMR->iAssociatedResCount == PMR_MAPCOUNT_MAX)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: iAssociatedResCount for PMR: @0x%p (%s) has overflowed.",
+ __func__,
+ psPMR,
+ psPMR->szAnnotation));
+ OSWarnOn(1);
+ return;
+ }
+
+ psPMR->iAssociatedResCount++;
+}
+
+void
+PMRGpuResCountDecr(PMR *psPMR)
+{
+ if (psPMR->iAssociatedResCount == PMR_MAPCOUNT_MIN)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: iAssociatedResCount (now %d) for PMR: @0x%p (%s) has underflowed.",
+ __func__,
+ psPMR->iAssociatedResCount,
+ psPMR,
+ psPMR->szAnnotation));
+ OSWarnOn(1);
+ return;
+ }
+
+ psPMR->iAssociatedResCount--;
+}
+
+IMG_BOOL
+PMR_IsGpuMultiMapped(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return psPMR->iAssociatedResCount > 1;
+}
+
PVRSRV_DEVICE_NODE *
PMR_DeviceNode(const PMR *psPMR)
{
@@ -1990,7 +2594,7 @@ PMR_IsZombie(const PMR *psPMR)
{
PVR_ASSERT(psPMR != NULL);
- return BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_IS_ZOMBIE);
+ return _IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_IS_ZOMBIE);
}
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
@@ -2005,19 +2609,20 @@ PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag)
if (bFlag)
{
- BITMASK_SET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE);
+ _IntFlagSet(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE);
}
else
{
- BITMASK_UNSET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE);
+ _IntFlagClr(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE);
}
}
IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR)
{
+
PVR_ASSERT(psPMR != NULL);
- return BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE);
+ return _IntFlagIsSet(psPMR, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE);
}
void
@@ -2445,9 +3050,33 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
IMG_UINT32 uiSparseFlags)
{
PVRSRV_ERROR eError;
-
PMRLockPMR(psPMR);
+ eError = PMR_ChangeSparseMemUnlocked(psPMR,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiSparseFlags);
+
+ PMRUnlockPMR(psPMR);
+
+ return eError;
+}
+
+PVRSRV_ERROR PMR_ChangeSparseMemUnlocked(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiSparseFlags)
+{
+ PVRSRV_ERROR eError;
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_IMPL_ZOMBIEPAGES pvZombiePages = NULL;
+ PMR_ZOMBIE_PAGES* psPMRZombiePages = NULL;
+#endif
+
if (PMR_IsMemLayoutFixed(psPMR) || PMR_IsCpuMapped(psPMR))
{
PVR_DPF((PVR_DBG_ERROR,
@@ -2455,7 +3084,6 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
__func__,
PMR_IsMemLayoutFixed(psPMR) ? 'Y' : 'n',
PMR_IsCpuMapped(psPMR) ? 'Y' : 'n'));
- PMRUnlockPMR(psPMR);
return PVRSRV_ERROR_PMR_NOT_PERMITTED;
}
@@ -2464,16 +3092,27 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
PVR_DPF((PVR_DBG_ERROR,
"%s: This type of sparse PMR cannot be changed.",
__func__));
- PMRUnlockPMR(psPMR);
return PVRSRV_ERROR_NOT_IMPLEMENTED;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ if (uiSparseFlags & SPARSE_RESIZE_FREE)
+ {
+ /* Speculatively preallocate in order to simplify error handling later */
+ psPMRZombiePages = OSAllocZMem(sizeof(PMR_ZOMBIE_PAGES));
+ PVR_GOTO_IF_NOMEM(psPMRZombiePages, eError, e0);
+ }
+#endif
+
eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData,
psPMR,
ui32AllocPageCount,
pai32AllocIndices,
ui32FreePageCount,
pai32FreeIndices,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ &pvZombiePages,
+#endif
uiSparseFlags);
if (eError != PVRSRV_OK)
{
@@ -2486,9 +3125,52 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
OSGetCurrentClientProcessIDKM());
}
#endif
- goto e0;
+ goto e1;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ if (pvZombiePages != NULL)
+ {
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap);
+ eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ /* Treat unknown power state as ON. */
+ ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+ }
+
+ if (ePowerState == PVRSRV_DEV_POWER_STATE_OFF ||
+ psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR)
+ {
+ /* Free preallocated psPMRZombiePages as these won't be used*/
+ OSFreeMem(psPMRZombiePages);
+
+ eError = psPMR->psFuncTab->pfnFreeZombiePages(pvZombiePages);
+ PVR_LOG_GOTO_IF_ERROR(eError, "Error when trying to free zombies immediately.", e0);
+ }
+ else
+ {
+ PVR_ASSERT(psPMRZombiePages != NULL);
+ psPMRZombiePages->sHeader.eZombieType = PMR_ZOMBIE_TYPE_PAGES;
+ psPMRZombiePages->pfnFactoryFreeZombies = psPMR->psFuncTab->pfnFreeZombiePages;
+ psPMRZombiePages->pvFactoryPages = pvZombiePages;
+
+ _ZombieListLock(psDevNode);
+ dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMRZombiePages->sHeader.sZombieNode);
+ psDevNode->uiPMRZombieCount++;
+ _ZombieListUnlock(psDevNode);
+ }
+ }
+ else
+ {
+ /* Free psPMRZombiePages as change sparse has not produced zombie pages */
+ OSFreeMem(psPMRZombiePages);
+ }
+#endif
+
#if defined(PDUMP)
{
IMG_BOOL bInitialise = IMG_FALSE;
@@ -2517,8 +3199,15 @@ PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
#endif
+ return PVRSRV_OK;
+e1:
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ if (uiSparseFlags & SPARSE_RESIZE_FREE)
+ {
+ OSFreeMem(psPMRZombiePages);
+ }
e0:
- PMRUnlockPMR(psPMR);
+#endif
return eError;
}
@@ -4170,23 +4859,91 @@ PMRFreeZombies(PPVRSRV_DEVICE_NODE psDeviceNode)
DLLIST_NODE *psThis, *psNext;
IMG_INT32 uiZombieCount;
- OSLockAcquire(psDeviceNode->hPMRZombieListLock);
+ _ZombieListLock(psDeviceNode);
/* Move the zombie list to a local copy. The original list will become
* an empty list. This will allow us to process the list without holding
* the list lock. */
dllist_replace_head(&psDeviceNode->sPMRZombieList, &sZombieList);
uiZombieCount = psDeviceNode->uiPMRZombieCount;
psDeviceNode->uiPMRZombieCount = 0;
- OSLockRelease(psDeviceNode->hPMRZombieListLock);
+ _ZombieListUnlock(psDeviceNode);
dllist_foreach_node(&sZombieList, psThis, psNext)
{
- PMR *psPMR = IMG_CONTAINER_OF(psThis, PMR, sZombieNode);
+ dllist_remove_node(psThis);
+ switch (PMR_GetZombieTypeFromNode(psThis))
+ {
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ case PMR_ZOMBIE_TYPE_PAGES:
+ {
+ PVRSRV_ERROR eError;
+ PMR_ZOMBIE_PAGES* psZombiePages = PMR_GetZombiePagesFromNode(psThis);
- dllist_remove_node(&psPMR->sZombieNode);
+ eError = psZombiePages->pfnFactoryFreeZombies(psZombiePages->pvFactoryPages);
+ if (eError != PVRSRV_OK)
+ {
+ /* In case of failure to free zombie pages, remove it from
+ * the sZombieList and add back to the original list. */
+ _ZombieListLock(psDeviceNode);
+ dllist_add_to_tail(&psDeviceNode->sPMRZombieList, psThis);
+ psDeviceNode->uiPMRZombieCount++;
+ _ZombieListUnlock(psDeviceNode);
- _PMRDestroy(psPMR);
+ PVR_DPF((PVR_DBG_ERROR, "Cannot free zombie pages!"));
+ continue;
+ }
+ OSFreeMem(psZombiePages);
+ break;
+ }
+#endif
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ case PMR_ZOMBIE_TYPE_DEVICE_IMPORT:
+ {
+ PMR_DEVICE_IMPORT *psImport = PMR_GetDeviceImportFromNode(psThis);
+ _DeviceImportFreeImportZombie(psImport);
+ break;
+ }
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+ case PMR_ZOMBIE_TYPE_PMR:
+ {
+ PMR *psPMR = PMR_GetPMRFromNode(psThis);
+ const PMR_IMPL_FUNCTAB *psFuncTable = psPMR->psFuncTab;
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ /* The PMR cannot be freed as other devices are
+ * still waiting for the cache flush. */
+ PMRLockPMR(psPMR);
+ if (_DeviceImportBitmapGet(psPMR) != 0)
+ {
+ PDLLIST_NODE psNodeImport;
+ PMR_DEVICE_IMPORT *psImport;
+ /* Transfer the ownership to a different
+ * device queue that has not been processed yet.
+ * There will be a PMR_DEVICE_IMPORT on the same
+ * queue, however, this doesn't have any knock on affects as
+ * it will be freed before the PMR is reached again. */
+ psNodeImport = dllist_get_next_node(&psPMR->sXDeviceImports);
+ PVR_ASSERT(psNodeImport);
+ psImport = IMG_CONTAINER_OF(psNodeImport, PMR_DEVICE_IMPORT, sNext);
+ _ZombieListLock(psImport->psDevNode);
+ dllist_add_to_tail(&psImport->psDevNode->sPMRZombieList, psThis);
+ psImport->psDevNode->uiPMRZombieCount++;
+ _ZombieListUnlock(psImport->psDevNode);
+ PMRUnlockPMR(psPMR);
+ break;
+ }
+ PMRUnlockPMR(psPMR);
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+ _FactoryLock(psFuncTable);
+ _PMRDestroy(psPMR);
+ _FactoryUnlock(psFuncTable);
+ break;
+ }
+ }
uiZombieCount--;
}
@@ -4198,20 +4955,48 @@ PMRDumpZombies(PPVRSRV_DEVICE_NODE psDeviceNode)
{
DLLIST_NODE *psThis, *psNext;
- OSLockAcquire(psDeviceNode->hPMRZombieListLock);
+ _ZombieListLock(psDeviceNode);
PVR_DPF((PVR_DBG_ERROR, "Items in zombie list: %u",
psDeviceNode->uiPMRZombieCount));
dllist_foreach_node(&psDeviceNode->sPMRZombieList, psThis, psNext)
{
- PMR *psPMR = IMG_CONTAINER_OF(psThis, PMR, sZombieNode);
+ switch (PMR_GetZombieTypeFromNode(psThis))
+ {
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ case PMR_ZOMBIE_TYPE_PAGES:
+ {
+ PMR_ZOMBIE_PAGES* psZombiePages = PMR_GetZombiePagesFromNode(psThis);
+ PVR_DPF((PVR_DBG_ERROR, "Zombie Pages = %p", psZombiePages));
+ break;
+ }
+#endif
- PVR_DPF((PVR_DBG_ERROR, "PMR = %px, Flavour = %s, Annotation: %s",
- psPMR, PMR_GetTypeStr(psPMR), PMR_GetAnnotation(psPMR)));
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ case PMR_ZOMBIE_TYPE_DEVICE_IMPORT:
+ {
+ PMR_DEVICE_IMPORT* psImport = PMR_GetDeviceImportFromNode(psThis);
+ PVR_DPF((PVR_DBG_ERROR, "Device Import = %p, DevID = %u, PMR = %px (%s)",
+ psImport,
+ psImport->psDevNode->sDevId.ui32InternalID,
+ psImport->psParent,
+ PMR_GetAnnotation(psImport->psParent)));
+ break;
+ }
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
+ case PMR_ZOMBIE_TYPE_PMR:
+ {
+ PMR *psPMR = PMR_GetPMRFromNode(psThis);
+ PVR_DPF((PVR_DBG_ERROR, "PMR = %px, Flavour = %s, Annotation: %s",
+ psPMR, PMR_GetTypeStr(psPMR), PMR_GetAnnotation(psPMR)));
+ break;
+ }
+ }
}
- OSLockRelease(psDeviceNode->hPMRZombieListLock);
+ _ZombieListUnlock(psDeviceNode);
}
void
@@ -4222,3 +5007,23 @@ PMRDeInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
OSLockDestroy(psDeviceNode->hPMRZombieListLock);
}
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+PVRSRV_ERROR
+PMR_RegisterDeviceImport(PMR* psPMR, PPVRSRV_DEVICE_NODE psDevNode)
+{
+ PVR_ASSERT(!PMR_IsZombie(psPMR));
+
+ if (PMR_DeviceNode(psPMR) != psDevNode)
+ {
+ PVRSRV_ERROR eError = _DeviceImportRegister(psPMR, psDevNode);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_DeviceImportRegister");
+ }
+ /* else: We explicitly don't add the PMR's dev node to the list because
+ * this bitmask lets us know if the PMR is cross device. It's not
+ * an error to register with the original dev node, as the user is
+ * declaring "The PMR is using `psDevNode`", not that it's a new
+ * devnode. */
+ return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/pvrsrv.c b/drivers/gpu/img-rogue/23.2/services/server/common/pvrsrv.c
index afa7f6a..86be6ff 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/pvrsrv.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/pvrsrv.c
@@ -2219,15 +2219,6 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
}
#endif
- /* Initialise the paravirtualised connection */
- if (!PVRSRV_VZ_MODE_IS(NATIVE))
- {
- PvzConnectionInit();
- PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit);
- }
-
- BIT_SET(psDevConfig->psDevNode->ui32VmState, RGXFW_HOST_DRIVER_ID);
-
/* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6.
* Has to be set before call to PMRInitDevice(). */
psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7U;
@@ -2237,7 +2228,7 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
#endif
eError = PVRSRVRegisterDeviceDbgTable(psDeviceNode);
- PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit);
+ PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit);
eError = PVRSRVPowerLockInit(psDeviceNode);
PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable);
@@ -2373,12 +2364,21 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
psPVRSRVData->ui32RegisteredDevices++;
OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock);
+ /* Initialise the paravirtualised connection */
+ if (!PVRSRV_VZ_MODE_IS(NATIVE))
+ {
+ PvzConnectionInit();
+ PVR_GOTO_IF_ERROR(eError, ErrorRegisterDVFSDeviceFail);
+ }
+
+ BIT_SET(psDevConfig->psDevNode->ui32VmState, RGXFW_HOST_DRIVER_ID);
+
*ppsDeviceNode = psDeviceNode;
#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
/* Register the DVFS device now the device node is present in the dev-list */
eError = RegisterDVFSDevice(psDeviceNode);
- PVR_LOG_GOTO_IF_ERROR(eError, "RegisterDVFSDevice", ErrorRegisterDVFSDeviceFail);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RegisterDVFSDevice", ErrorPvzConnectionDeInit);
#endif
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
@@ -2395,7 +2395,15 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
return PVRSRV_OK;
#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+ErrorPvzConnectionDeInit:
+#endif
+ psDevConfig->psDevNode = NULL;
+ if (!PVRSRV_VZ_MODE_IS(NATIVE))
+ {
+ PvzConnectionDeInit();
+ }
ErrorRegisterDVFSDeviceFail:
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
/* Remove the device from the list */
OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock);
List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
@@ -2455,12 +2463,6 @@ PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
PVRSRVPowerLockDeInit(psDeviceNode);
ErrorUnregisterDbgTable:
PVRSRVUnregisterDeviceDbgTable(psDeviceNode);
-ErrorPvzConnectionDeInit:
- psDevConfig->psDevNode = NULL;
- if (!PVRSRV_VZ_MODE_IS(NATIVE))
- {
- PvzConnectionDeInit();
- }
ErrorSysDevDeInit:
SysDevDeInit(psDevConfig);
ErrorDeregisterStats:
diff --git a/drivers/gpu/img-rogue/23.2/services/server/common/sync_server.c b/drivers/gpu/img-rogue/23.2/services/server/common/sync_server.c
index 12a7d1e..1e9a0ec 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/common/sync_server.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/common/sync_server.c
@@ -546,6 +546,12 @@ PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
PVR_UNREFERENCED_PARAMETER(psConnection);
+ if (!(GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Full sync tracking debug feature not enabled!", __func__));
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
RGXSRV_HWPERF_ALLOC(psDevNode, SYNC,
ui32FwBlockAddr + ui32SyncOffset,
pszClassName,
@@ -616,6 +622,12 @@ PVRSRVSyncRecordRemoveByHandleKM(
struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
PVRSRV_DEVICE_NODE *psDevNode;
+ if (!(GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Full sync tracking debug feature not enabled!", __func__));
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
PVR_RETURN_IF_INVALID_PARAM(hRecord);
psDevNode = pSync->psDevNode;
diff --git a/drivers/gpu/img-rogue/23.2/services/server/devices/rgxdebug_common.c b/drivers/gpu/img-rogue/23.2/services/server/devices/rgxdebug_common.c
index 8684309..9657e0a 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/devices/rgxdebug_common.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/devices/rgxdebug_common.c
@@ -811,7 +811,7 @@ static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPri
if (pui32FWCode[i] != ui32Value)
{
- PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)",
+ PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x ("IMG_KM_PTR_FMTSPEC"), FW 0x%08x (%x)",
__func__, pszDesc,
(i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr);
return PVRSRV_ERROR_FW_IMAGE_MISMATCH;
@@ -1844,7 +1844,7 @@ void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
PVR_DUMPDEBUG_LOG("------[ RGX Info ]------");
- PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo);
+ PVR_DUMPDEBUG_LOG("Device Node (Info): "IMG_KM_PTR_FMTSPEC" ("IMG_KM_PTR_FMTSPEC")", psDevInfo->psDeviceNode, psDevInfo);
DevicememHistoryDumpRecordStats(psDevInfo->psDeviceNode, pfnDumpDebugPrintf, pvDumpDebugFile);
PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B,
psDevInfo->sDevFeatureCfg.ui32V,
diff --git a/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxdebug.c b/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxdebug.c
index 23382dc..1f5e93a 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxdebug.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxdebug.c
@@ -3643,13 +3643,13 @@ PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
PVRSRV_ERROR eError;
PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
- PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM);
+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x"IMG_KM_PTR_FMTSPEC, psDevInfo->pvRegsBaseKM);
PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX)
if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
{
- PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x%p",
+ PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x"IMG_KM_PTR_FMTSPEC,
psDevInfo->pvSecureRegsBaseKM);
PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Physical): 0x%08lX",
(unsigned long)psDevInfo->sRegsPhysBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET);
diff --git a/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxinit.c b/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxinit.c
index 391a67c..b10712b 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxinit.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/devices/rogue/rgxinit.c
@@ -4356,7 +4356,7 @@ static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL),
"Security support requires Fw code and data memory be"
- " separate from the heap shared with the kernel driver.", ErrorDeinit);
+ " separate from the heap shared with the kernel driver.", FailDeinit);
if (psFwCodeHeapCfg != psFwDataHeapCfg)
{
@@ -4371,8 +4371,8 @@ static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
PVR_LOG_GOTO_IF_FALSE((psFwSharedHeapCfg->uiSize +
ui64FwPrivateHeapSize) ==
- RGX_FIRMWARE_RAW_HEAP_SIZE,
- "Invalid firmware physical heap size.", ErrorDeinit);
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ "Invalid firmware physical heap size.", FailDeinit);
}
#endif
@@ -4386,6 +4386,10 @@ static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
return eError;
+#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE)
+FailDeinit:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+#endif
ErrorDeinit:
PVR_ASSERT(IMG_FALSE);
diff --git a/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxdebug.c b/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxdebug.c
index 9d391cb..928716e 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxdebug.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxdebug.c
@@ -2416,13 +2416,13 @@ PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
PVRSRV_ERROR eError;
PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
- PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM);
+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x"IMG_KM_PTR_FMTSPEC, psDevInfo->pvRegsBaseKM);
PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX)
if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
{
- PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x%p",
+ PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x"IMG_KM_PTR_FMTSPEC,
psDevInfo->pvSecureRegsBaseKM);
PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Physical): 0x%08lX",
(unsigned long)psDevInfo->sRegsPhysBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET);
diff --git a/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxinit.c b/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxinit.c
index 1f15c7b..73cb05d 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxinit.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/devices/volcanic/rgxinit.c
@@ -4267,7 +4267,7 @@ static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL),
"Security support requires Fw code and data memory be"
- " separate from the heap shared with the kernel driver.", ErrorDeinit);
+ " separate from the heap shared with the kernel driver.", FailDeinit);
if (psFwCodeHeapCfg != psFwDataHeapCfg)
{
@@ -4282,8 +4282,8 @@ static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
PVR_LOG_GOTO_IF_FALSE((psFwSharedHeapCfg->uiSize +
ui64FwPrivateHeapSize) ==
- RGX_FIRMWARE_RAW_HEAP_SIZE,
- "Invalid firmware physical heap size.", ErrorDeinit);
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ "Invalid firmware physical heap size.", FailDeinit);
}
#endif
@@ -4297,6 +4297,10 @@ static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
return eError;
+#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE)
+FailDeinit:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+#endif
ErrorDeinit:
PVR_ASSERT(IMG_FALSE);
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/osfunc.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/osfunc.c
index f45bef4..ddf0332 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/osfunc.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/osfunc.c
@@ -68,6 +68,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <linux/kthread.h>
#include <linux/utsname.h>
#include <linux/scatterlist.h>
+#include <linux/pid.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
#include <linux/pfn_t.h>
#include <linux/pfn.h>
@@ -78,6 +79,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#else
#include <linux/sched.h>
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+
#if defined(SUPPORT_SECURE_ALLOC_KM)
#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND)
#include <linux/dma-heap.h>
@@ -837,6 +839,22 @@ IMG_CHAR *OSGetCurrentClientProcessNameKM(void)
return OSGetCurrentProcessName();
}
+uintptr_t OSAcquireCurrentPPIDResourceRefKM(void)
+{
+ struct pid *psPPIDResource = find_pid_ns(OSGetCurrentClientProcessIDKM(), &init_pid_ns);
+
+ PVR_ASSERT(psPPIDResource != NULL);
+ /* Take ref on pPid */
+ get_pid(psPPIDResource);
+ return (uintptr_t)psPPIDResource;
+}
+
+void OSReleasePPIDResourceRefKM(uintptr_t psPPIDResource)
+{
+ /* Drop ref on uiProc */
+ put_pid((struct pid*)psPPIDResource);
+}
+
uintptr_t OSGetCurrentClientThreadIDKM(void)
{
return OSGetCurrentThreadID();
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_dmabuf.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_dmabuf.c
index 9c84219..1fbddd7 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_dmabuf.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_dmabuf.c
@@ -165,6 +165,7 @@ typedef struct _PMR_DMA_BUF_DATA_
struct dma_buf_attachment *psAttachment;
PFN_DESTROY_DMABUF_PMR pfnDestroy;
IMG_BOOL bPoisonOnFree;
+ IMG_PID uiOriginPID;
/* Mapping information. */
struct iosys_map sMap;
@@ -305,14 +306,14 @@ static void PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
{
PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE,
psPrivData->ui32PhysPageCount << PAGE_SHIFT,
- OSGetCurrentClientProcessIDKM());
+ psPrivData->uiOriginPID);
}
else
#endif
{
PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,
psPrivData->ui32PhysPageCount << PAGE_SHIFT,
- OSGetCurrentClientProcessIDKM());
+ psPrivData->uiOriginPID);
}
#endif
@@ -358,12 +359,10 @@ static PVRSRV_ERROR PMRZombifyDmaBufMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR)
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,
psPrivData->ui32PhysPageCount << PAGE_SHIFT,
- OSGetCurrentClientProcessIDKM());
+ psPrivData->uiOriginPID);
PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE,
psPrivData->ui32PhysPageCount << PAGE_SHIFT,
- OSGetCurrentClientProcessIDKM());
-#else
- PVR_UNREFERENCED_PARAMETER(pvPriv);
+ psPrivData->uiOriginPID);
#endif
PVRSRVIonZombifyMemAllocRecord(psDmaBuf);
@@ -378,9 +377,17 @@ static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
return PVRSRV_OK;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages)
+#else
static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+#endif
{
PVR_UNREFERENCED_PARAMETER(pvPriv);
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ *ppvZombiePages = NULL;
+#endif
return PVRSRV_OK;
}
@@ -620,6 +627,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
psPrivData->psAttachment = psAttachment;
psPrivData->pfnDestroy = pfnDestroy;
psPrivData->bPoisonOnFree = bPoisonOnFree;
+ psPrivData->uiOriginPID = OSGetCurrentClientProcessIDKM();
psPrivData->ui32VirtPageCount =
(ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT;
@@ -752,7 +760,7 @@ PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,
psPrivData->ui32PhysPageCount << PAGE_SHIFT,
- OSGetCurrentClientProcessIDKM());
+ psPrivData->uiOriginPID);
#endif
uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
@@ -1079,7 +1087,25 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
#if defined(SUPPORT_PMR_DEFERRED_FREE)
if (PMR_IsZombie(psPMR))
{
- PMRDequeueZombieAndRef(psPMR);
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PMR_DMA_BUF_DATA *psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab);
+#endif
+
+ PMRReviveZombieAndRef(psPMR);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ if (psPrivData != NULL)
+ {
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,
+ psPrivData->ui32PhysPageCount << PAGE_SHIFT,
+ psPrivData->uiOriginPID);
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE,
+ psPrivData->ui32PhysPageCount << PAGE_SHIFT,
+ psPrivData->uiOriginPID);
+ }
+#endif
+
+ PVRSRVIonReviveMemAllocRecord(psDmaBuf);
}
else
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
@@ -1089,13 +1115,31 @@ PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
PMRRefPMR(psPMR);
}
+ /* If an existing PMR is found, the table wasn't created by this func
+ * call, so the error path can be safely ignored allowing for
+ * the factory lock can be dropped. */
+ PVR_ASSERT(bHashTableCreated == IMG_FALSE);
+ dma_buf_put(psDmaBuf);
+ PMRFactoryUnlock();
+
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+ /* The device import can only be registered on an alive & healthy PMR
+ * therefore we wait for the potential zombie to be dequeued first. */
+ eError = PMR_RegisterDeviceImport(psPMR, psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ /* The factory lock might be taken in PMRUnrefPMR. */
+ (void) PMRUnrefPMR(psPMR);
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register PMR with device: %u",
+ __func__, psDevNode->sDevId.ui32InternalID));
+ goto errReturn;
+ }
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
*ppsPMRPtr = psPMR;
PMR_LogicalSize(psPMR, puiSize);
*puiAlign = PAGE_SIZE;
- PMRFactoryUnlock();
- dma_buf_put(psDmaBuf);
-
/* We expect a PMR to be immutable at this point.
* But its explicitly set here to cover a corner case
* where a PMR created through non-DMA interface could be
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_extmem_linux.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_extmem_linux.c
index 8ae96a6..7de1200 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_extmem_linux.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_extmem_linux.c
@@ -611,6 +611,15 @@ PMRWriteBytesExtMem(PMR_IMPL_PRIVDATA pvPriv,
size_t uiBufSz,
size_t *puiNumBytes)
{
+ PMR_WRAP_DATA *psWrapData = (PMR_WRAP_DATA*) pvPriv;
+
+ if (!BITMASK_HAS(psWrapData->psVMArea->vm_flags, VM_WRITE))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to write to read only vma.",
+ __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
return _CopyBytesExtMem(pvPriv,
uiOffset,
pcBuffer,
@@ -883,6 +892,14 @@ static inline PVRSRV_ERROR PhysmemValidateParam( IMG_DEVMEM_SIZE_T uiSize,
return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
}
+ if (uiFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device specific flags not supported. "
+ "Passed Flags: 0x%"PVRSRV_MEMALLOCFLAGS_FMTSPEC,
+ __func__, uiFlags));
+ return PVRSRV_ERROR_INVALID_FLAGS;
+ }
+
#if !defined(PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE)
if (uiFlags & (PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_osmem_linux.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_osmem_linux.c
index 21a0f98..62e1071 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_osmem_linux.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/physmem_osmem_linux.c
@@ -2927,6 +2927,168 @@ _FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
return PVRSRV_OK;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+static PVRSRV_ERROR PMRFreeZombiePagesOSMem(PMR_IMPL_ZOMBIEPAGES pvPriv)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psZombiePageArray = pvPriv;
+
+ eError = _FreeOSPages(psZombiePageArray,
+ NULL,
+ 0 /* Unused */);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ _FreeOSPagesArray(psZombiePageArray);
+
+ return PVRSRV_OK;
+e0:
+ return eError;
+}
+
+/* Extracts ui32ExtractPageCount of pages referenced in pai32ExtractIndices from the psSrcPageArrayData
+ * Allocates a new PMR_OSPAGEARRAY_DATA object and fills it with the extracted pages information.
+ * Pages in this context are dev page size and are handled as such.
+ */
+static PVRSRV_ERROR
+_ExtractPages(PMR_OSPAGEARRAY_DATA *psSrcPageArrayData,
+ IMG_UINT32 *pai32ExtractIndices,
+ IMG_UINT32 ui32ExtractPageCount,
+ PMR_OSPAGEARRAY_DATA** psOutPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ PMR_OSPAGEARRAY_DATA* psDstPageArrayData;
+ IMG_UINT32 uiOrder;
+
+ /* Alloc PMR_OSPAGEARRAY_DATA for the extracted pages */
+ eError = _AllocOSPageArray(psSrcPageArrayData->psDevNode,
+ (IMG_UINT64)ui32ExtractPageCount << psSrcPageArrayData->uiLog2AllocPageSize,
+ ui32ExtractPageCount,
+ ui32ExtractPageCount,
+ psSrcPageArrayData->uiLog2AllocPageSize,
+ psSrcPageArrayData->ui32AllocFlags,
+ psSrcPageArrayData->ui32CPUCacheFlags,
+ psSrcPageArrayData->uiPid,
+ &psDstPageArrayData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocOSPageArray failed in _ExtractPages"));
+ return eError;
+ }
+
+ uiOrder = psSrcPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+
+ /* Transfer from src pagearray to dst pagearray */
+ for (i = 0; i < ui32ExtractPageCount; i++)
+ {
+ IMG_UINT32 idxSrc = pai32ExtractIndices[i];
+
+ if (psSrcPageArrayData->pagearray[idxSrc] != NULL)
+ {
+ psDstPageArrayData->pagearray[i] = psSrcPageArrayData->pagearray[idxSrc];
+ psSrcPageArrayData->pagearray[idxSrc] = NULL;
+ }
+ }
+
+ /* Do the same for dmaphysarray and dmavirtarray if allocated with CMA */
+ if (BIT_ISSET(psSrcPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+ {
+ for (i = 0; i < ui32ExtractPageCount; i++)
+ {
+ IMG_UINT32 idxSrc = pai32ExtractIndices[i];
+
+ if (psSrcPageArrayData->dmaphysarray[idxSrc] != (dma_addr_t)0 ||
+ psSrcPageArrayData->dmavirtarray[idxSrc] != NULL)
+ {
+ psDstPageArrayData->dmaphysarray[i] = psSrcPageArrayData->dmaphysarray[idxSrc];
+ psDstPageArrayData->dmavirtarray[i] = psSrcPageArrayData->dmavirtarray[idxSrc];
+
+ psSrcPageArrayData->dmaphysarray[idxSrc] = (dma_addr_t)0;
+ psSrcPageArrayData->dmavirtarray[idxSrc] = NULL;
+ }
+ }
+ }
+
+ /* Update page counts */
+ psSrcPageArrayData->iNumOSPagesAllocated -= ui32ExtractPageCount << uiOrder;
+ psDstPageArrayData->iNumOSPagesAllocated += ui32ExtractPageCount << uiOrder;
+
+ *psOutPageArrayData = psDstPageArrayData;
+ return PVRSRV_OK;
+}
+
+/* Extracts all allocated pages referenced psSrcPageArrayData
+ * Allocates a new PMR_OSPAGEARRAY_DATA object and fills it with the extracted
+ * pages information.
+ */
+static PVRSRV_ERROR
+_ExtractAllPages(PMR_OSPAGEARRAY_DATA *psSrcPageArrayData,
+ PMR_OSPAGEARRAY_DATA **psOutPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ PMR_OSPAGEARRAY_DATA* psDstPageArrayData;
+ IMG_UINT32 uiPagesCopied = 0;
+
+ /* Alloc PMR_OSPAGEARRAY_DATA for the extracted pages */
+ eError = _AllocOSPageArray(psSrcPageArrayData->psDevNode,
+ (IMG_UINT64)psSrcPageArrayData->uiTotalNumOSPages << psSrcPageArrayData->uiLog2AllocPageSize,
+ psSrcPageArrayData->iNumOSPagesAllocated,
+ psSrcPageArrayData->uiTotalNumOSPages,
+ psSrcPageArrayData->uiLog2AllocPageSize,
+ psSrcPageArrayData->ui32AllocFlags,
+ psSrcPageArrayData->ui32CPUCacheFlags,
+ psSrcPageArrayData->uiPid,
+ &psDstPageArrayData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocOSPageArray failed in _ExtractPages"));
+ return eError;
+ }
+
+ /* Transfer from src pagearray to dst pagearray */
+ /* Iterate through all pages in psSrcPageArrayData but stop once
+ * we have copied psSrcPageArrayData->iNumOSPagesAllocated pages to
+ * psDstPageArrayData.
+ */
+ for (i = 0; ((i < psSrcPageArrayData->uiTotalNumOSPages) &&
+ (uiPagesCopied < psSrcPageArrayData->iNumOSPagesAllocated)); i++)
+ {
+ if (psSrcPageArrayData->pagearray[i] != NULL)
+ {
+ psDstPageArrayData->pagearray[uiPagesCopied] =
+ psSrcPageArrayData->pagearray[i];
+
+ psSrcPageArrayData->pagearray[i] = NULL;
+
+ if (BIT_ISSET(psSrcPageArrayData->ui32AllocFlags, FLAG_IS_CMA) &&
+ (psSrcPageArrayData->dmaphysarray[i] != (dma_addr_t)0 ||
+ psSrcPageArrayData->dmavirtarray[i] != NULL))
+ {
+ psDstPageArrayData->dmaphysarray[uiPagesCopied] =
+ psSrcPageArrayData->dmaphysarray[i];
+ psDstPageArrayData->dmavirtarray[uiPagesCopied] =
+ psSrcPageArrayData->dmavirtarray[i];
+
+ psSrcPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
+ psSrcPageArrayData->dmavirtarray[i] = NULL;
+ }
+ uiPagesCopied++;
+ }
+ }
+ /* Update page counts */
+ psDstPageArrayData->iNumOSPagesAllocated = psSrcPageArrayData->iNumOSPagesAllocated;
+ psSrcPageArrayData->iNumOSPagesAllocated = 0;
+
+ *psOutPageArrayData = psDstPageArrayData;
+ return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */
+
/* Free pages from a page array.
* Takes care of mem stats and chooses correct free path depending on parameters. */
static PVRSRV_ERROR
@@ -3124,15 +3286,45 @@ PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
return eError;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages)
+#else
static PVRSRV_ERROR
PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+#endif
{
/* Just drops the refcount. */
PVRSRV_ERROR eError = PVRSRV_OK;
PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_OSPAGEARRAY_DATA *psExtractedPagesPageArray = NULL;
+
+ *ppvZombiePages = NULL;
+#endif
if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND))
{
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ if (psOSPageArrayData->iNumOSPagesAllocated == 0)
+ {
+ *ppvZombiePages = NULL;
+ return PVRSRV_OK;
+ }
+
+ eError = _ExtractAllPages(psOSPageArrayData,
+ &psExtractedPagesPageArray);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_ExtractAllPages", e0);
+
+ if (psExtractedPagesPageArray)
+ {
+ /* Zombify pages to get proper stats */
+ eError = PMRZombifyOSMem(psExtractedPagesPageArray, NULL);
+ PVR_WARN_IF_ERROR(eError, "PMRZombifyOSMem");
+ }
+ *ppvZombiePages = psExtractedPagesPageArray;
+#else
/* Free Memory for deferred allocation */
eError = _FreeOSPages(psOSPageArrayData,
NULL,
@@ -3141,8 +3333,12 @@ PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
{
return eError;
}
+#endif
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+e0:
+#endif
PVR_ASSERT(eError == PVRSRV_OK);
return eError;
}
@@ -3460,6 +3656,9 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
IMG_UINT32 *pai32AllocIndices,
IMG_UINT32 ui32FreePageCount,
IMG_UINT32 *pai32FreeIndices,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages,
+#endif
IMG_UINT32 uiFlags)
{
PVRSRV_ERROR eError;
@@ -3512,6 +3711,10 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
ui32FreePageCount = 0;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ *ppvZombiePages = NULL;
+#endif
+
if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
{
eError = PVRSRV_ERROR_INVALID_PARAMS;
@@ -3647,9 +3850,27 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
}
}
- /* Free the additional free pages */
+ /* Free or zombie the additional free pages */
if (0 != ui32AdtnlFreePages)
{
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_OSPAGEARRAY_DATA *psExtractedPagesPageArray = NULL;
+
+ eError = _ExtractPages(psPMRPageArrayData,
+ &pai32FreeIndices[ui32Loop],
+ ui32AdtnlFreePages,
+ &psExtractedPagesPageArray);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Zombify pages to get proper stats */
+ eError = PMRZombifyOSMem(psExtractedPagesPageArray, NULL);
+ PVR_LOG_IF_ERROR(eError, "psExtractedPagesPageArray");
+
+ *ppvZombiePages = psExtractedPagesPageArray;
+#else
eError = _FreeOSPages(psPMRPageArrayData,
&pai32FreeIndices[ui32Loop],
ui32AdtnlFreePages);
@@ -3657,6 +3878,7 @@ PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
{
goto e0;
}
+#endif /* SUPPORT_PMR_PAGES_DEFERRED_FREE */
psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
while (ui32Loop < ui32FreePageCount)
{
@@ -3713,6 +3935,9 @@ static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
.pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
.pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
.pfnFinalize = &PMRFinalizeOSMem,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ .pfnFreeZombiePages = &PMRFreeZombiePagesOSMem,
+#endif
#if defined(SUPPORT_PMR_DEFERRED_FREE)
.pfnZombify = &PMRZombifyOSMem,
#endif
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_fence.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_fence.c
index 5638dad..07c149a 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_fence.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_fence.c
@@ -1139,7 +1139,7 @@ u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
/* Dump sync info */
PVR_DUMPDEBUG_LOG(pfnDummy, NULL,
- "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)",
+ "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - ["IMG_KM_PTR_FMTSPEC"] %s)",
SyncCheckpointGetId(checkpoint),
fence_ufo_addr,
SyncCheckpointGetTimeline(checkpoint),
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.c
index b145c93..6b5ad15 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.c
@@ -587,4 +587,31 @@ void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf)
out:
OSLockRelease(psState->hBuffersLock);
}
+
+void PVRSRVIonReviveMemAllocRecord(const struct dma_buf *psDmaBuf)
+{
+ PVR_ION_STATS_STATE *psState = &gPvrIonStatsState;
+ PVR_ION_STATS_BUF *psBuf;
+
+ if (!psDmaBuf) {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid dma buffer"));
+ return;
+ }
+
+ /* We're only interested in ION buffers */
+ if (isIonBuf(psDmaBuf) == IMG_FALSE)
+ return;
+
+ OSLockAcquire(psState->hBuffersLock);
+ psBuf = GetBuf(&psState->buffers, (uintptr_t)psDmaBuf);
+ if (!psBuf) {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to find dma buffer"));
+ goto out;
+ }
+
+ psBuf->bZombie = IMG_FALSE;
+
+out:
+ OSLockRelease(psState->hBuffersLock);
+}
#endif
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.h b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.h
index 1c0d44a..c474813 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.h
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_ion_stats.h
@@ -59,6 +59,7 @@ void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf);
#if defined(SUPPORT_PMR_DEFERRED_FREE)
void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf);
+void PVRSRVIonReviveMemAllocRecord(const struct dma_buf *psDmaBuf);
#endif
#else
static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void)
@@ -85,6 +86,10 @@ static INLINE void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf
{
PVR_UNREFERENCED_PARAMETER(psDmaBuf);
}
+static INLINE void PVRSRVIonReviveMemAllocRecord(const struct dma_buf *psDmaBuf)
+{
+ PVR_UNREFERENCED_PARAMETER(psDmaBuf);
+}
#endif
#endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */
diff --git a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_sync_file.c b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_sync_file.c
index e365610..385872d 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_sync_file.c
+++ b/drivers/gpu/img-rogue/23.2/services/server/env/linux/pvr_sync_file.c
@@ -1036,7 +1036,7 @@ static void _dump_sync_point(struct dma_fence *fence,
PVR_DUMPDEBUG_LOG(dump_debug_printf,
dump_debug_file,
- "<%p> Seq#=%llu TS=%s State=%s TLN=%s",
+ "<"IMG_KM_PTR_FMTSPEC"> Seq#=%llu TS=%s State=%s TLN=%s",
fence,
(u64) fence->seqno,
time,
@@ -1055,7 +1055,7 @@ static void _dump_fence(struct dma_fence *fence,
if (fence_array) {
PVR_DUMPDEBUG_LOG(dump_debug_printf,
dump_debug_file,
- "Fence: [%p] Sync Points:\n",
+ "Fence: ["IMG_KM_PTR_FMTSPEC"] Sync Points:\n",
fence_array);
for (i = 0; i < fence_array->num_fences; i++)
diff --git a/drivers/gpu/img-rogue/23.2/services/server/include/handle.h b/drivers/gpu/img-rogue/23.2/services/server/include/handle.h
index 92946b6..7ce28a5 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/include/handle.h
+++ b/drivers/gpu/img-rogue/23.2/services/server/include/handle.h
@@ -152,6 +152,7 @@ typedef struct _PROCESS_HANDLE_BASE_
{
PVRSRV_HANDLE_BASE *psHandleBase;
ATOMIC_T iRefCount;
+ uintptr_t uiHashKey;
} PROCESS_HANDLE_BASE;
extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
@@ -195,10 +196,13 @@ PVRSRV_ERROR PVRSRVHandleInit(void);
PVRSRV_ERROR PVRSRVHandleDeInit(void);
+/* Only called from sync_fallback_server.c */
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void);
+#endif
-PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase);
-PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, IMG_UINT64 ui64MaxBridgeTime);
+PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(PROCESS_HANDLE_BASE **ppsBase);
+PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
void LockHandle(PVRSRV_HANDLE_BASE *psBase);
void UnlockHandle(PVRSRV_HANDLE_BASE *psBase);
diff --git a/drivers/gpu/img-rogue/23.2/services/server/include/mmu_common.h b/drivers/gpu/img-rogue/23.2/services/server/include/mmu_common.h
index b14046d..797ce6e 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/include/mmu_common.h
+++ b/drivers/gpu/img-rogue/23.2/services/server/include/mmu_common.h
@@ -512,7 +512,7 @@ MMUX_MapVRangeToBackingPage(MMU_CONTEXT *psMMUContext,
PVRSRV_ERROR
MMU_MapPMRFast(MMU_CONTEXT *psMMUContext,
IMG_DEV_VIRTADDR sDevVAddr,
- const PMR *psPMR,
+ PMR *psPMR,
IMG_DEVMEM_SIZE_T uiSizeBytes,
PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
IMG_UINT32 uiLog2PageSize);
diff --git a/drivers/gpu/img-rogue/23.2/services/server/include/osfunc.h b/drivers/gpu/img-rogue/23.2/services/server/include/osfunc.h
index 07e8a4d..d2b67d3 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/include/osfunc.h
+++ b/drivers/gpu/img-rogue/23.2/services/server/include/osfunc.h
@@ -684,6 +684,32 @@ IMG_PID OSGetCurrentClientProcessIDKM(void);
IMG_CHAR *OSGetCurrentClientProcessNameKM(void);
/*************************************************************************/ /*!
+@Function OSAcquireCurrentPPIDResourceRefKM
+@Description Returns a unique process identifier for the current client
+ parent process (thread group) and takes a reference on it
+ (if required) to prevent it being freed/re-allocated.
+ This value may then be used as a unique reference to the
+ process rather than using the PID value which might be
+ reallocated to represent a further process on process
+ destruction.
+ Note that the value to be returned is an address relating to
+ the parent process (thread group) and not to just one thread.
+ It is the caller's responsibility to ensure the reference is
+ subsequently dropped (by calling OSReleasePPIDResourceRefKM())
+ to allow it to be freed when no longer required.
+@Return Address of a kernel resource allocated for the current client
+ parent process (thread group)
+*****************************************************************************/
+uintptr_t OSAcquireCurrentPPIDResourceRefKM(void);
+
+/*************************************************************************/ /*!
+@Function OSReleasePPIDResourceRefKM
+@Description Drops a reference on the unique process identifier provided.
+@Return None
+*****************************************************************************/
+void OSReleasePPIDResourceRefKM(uintptr_t psPPIDResource);
+
+/*************************************************************************/ /*!
@Function OSGetCurrentClientThreadIDKM
@Description Returns ID for current client thread
For some operating systems, this may simply be the current
diff --git a/drivers/gpu/img-rogue/23.2/services/server/include/pmr.h b/drivers/gpu/img-rogue/23.2/services/server/include/pmr.h
index ac331ba..e4d64ea 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/include/pmr.h
+++ b/drivers/gpu/img-rogue/23.2/services/server/include/pmr.h
@@ -531,6 +531,34 @@ PMRCpuMapCountDecr(PMR *psPMR);
IMG_BOOL
PMR_IsCpuMapped(PMR *psPMR);
+/*
+ * PMRGpuResCountIncr()
+ *
+ * Increment count of the number of current GPU reservations associated with the PMR.
+ * Must be protected by PMR lock.
+ */
+void
+PMRGpuResCountIncr(PMR *psPMR);
+
+/*
+ * PMRGpuResCountDecr()
+ *
+ * Decrement count of the number of current GPU reservations associated with the PMR.
+ * Must be protected by PMR lock.
+ *
+ */
+void
+PMRGpuResCountDecr(PMR *psPMR);
+
+/*
+ * PMR_IsGpuMultiMapped()
+ *
+ * Must be protected by PMR lock.
+ *
+ */
+IMG_BOOL
+PMR_IsGpuMultiMapped(PMR *psPMR);
+
PPVRSRV_DEVICE_NODE
PMR_DeviceNode(const PMR *psPMR);
@@ -685,16 +713,32 @@ IMG_BOOL
PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode);
/*
- * PMRDequeueZombieAndRef
+ * PMRReviveZombieAndRef
*
* Removed the PMR either form zombie list or cleanup item's list
* and references it.
*/
void
-PMRDequeueZombieAndRef(PMR *psPMR);
+PMRReviveZombieAndRef(PMR *psPMR);
#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */
/*
+ * PMR_ChangeSparseMemUnlocked()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in device space by
+ * adding/deleting the pages as requested. PMR lock must be taken
+ * before calling this function.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMemUnlocked(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiSparseFlags);
+/*
* PMR_ChangeSparseMem()
*
* See note above about Lock/Unlock semantics.
@@ -1155,4 +1199,17 @@ PMRGetIPAInfo(PMR *psPMR, IMG_UINT32 *pui32IPAPolicy, IMG_UINT32 *pui32IPAShift,
IMG_UINT32 *pui32IPAMask, IMG_UINT32 *pui32IPAFlagsValue);
#endif
+#if defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE)
+/*
+ *
+ * PMR_RegisterDeviceImport()
+ *
+ * Register the PMR with the device node.
+ * This is required for the PMR to be marked as a XD PMR.
+ * Silently handles if the PMR is already registered with the device.
+ */
+PVRSRV_ERROR
+PMR_RegisterDeviceImport(PMR* psPMR, PPVRSRV_DEVICE_NODE psDevNode);
+#endif /* defined(SUPPORT_PMR_DEVICE_IMPORT_DEFERRED_FREE) */
+
#endif /* #ifdef SRVSRV_PMR_H */
diff --git a/drivers/gpu/img-rogue/23.2/services/server/include/pmr_impl.h b/drivers/gpu/img-rogue/23.2/services/server/include/pmr_impl.h
index 68089dd..3fc7949 100644
--- a/drivers/gpu/img-rogue/23.2/services/server/include/pmr_impl.h
+++ b/drivers/gpu/img-rogue/23.2/services/server/include/pmr_impl.h
@@ -79,6 +79,10 @@ typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE;
*/
typedef void *PMR_MMAP_DATA;
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+typedef void *PMR_IMPL_ZOMBIEPAGES;
+#endif
+
#define PMR_IMPL_TYPES \
X(NONE), \
X(OSMEM), \
@@ -135,11 +139,22 @@ typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
@Input pvPriv Private data (which was generated by the
PMR factory when PMR was created)
+@Output ppvZombiePages Zombie pages object. If non-null is returned
+ caller is obligated to call pfnFreeZombiePages
+ at an appropriate time to prevent memory leaks.
+ If support for deferred freeing of pages is not
+ provided, the implementation must set
+ *ppvZombiePages to NULL.
@Return PVRSRV_OK if the operation was successful, an error code
otherwise.
*/ /**************************************************************************/
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages);
+#else
typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+#endif
#if defined(PVRSRV_SUPPORT_IPA_FEATURE)
/*************************************************************************/ /*!
@@ -365,6 +380,9 @@ typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
allocation that do not require
a physical allocation.
@Input ui32Flags Allocation flags
+@Output ppvZombiePages Zombie pages object. If non-null is returned
+ caller is obligated to call pfnFreeZombiePages
+ at an appropriate time to prevent memory leaks
@Return PVRSRV_OK if the sparse allocation physical backing was updated
successfully, an error code otherwise.
@@ -375,6 +393,9 @@ typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
IMG_UINT32 *pai32AllocIndices,
IMG_UINT32 ui32FreePageCount,
IMG_UINT32 *pai32FreeIndices,
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+ PMR_IMPL_ZOMBIEPAGES *ppvZombiePages,
+#endif
IMG_UINT32 uiFlags);
/*************************************************************************/ /*!
@@ -507,6 +528,29 @@ typedef PVRSRV_ERROR (*PFN_ZOMBIFY_FN)(PMR_IMPL_PRIVDATA pvPriv,
PMR *psPMR);
#endif
+#ifdef SUPPORT_PMR_PAGES_DEFERRED_FREE
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_FREE_ZOMBIE_PAGES_FN
+
+@Description Called to perform factory actions to free zombie pages object
+ previously returned by PFN_CHANGE_SPARSE_MEM_FN.
+
+ This function should free the pages described in the
+ pvZombiePages parameter and do any associated actions related
+ to freeing such as poisoning or returning to the page pool.
+
+ Implementation of this callback is required when
+ SUPPORT_PMR_PAGES_DEFERRED_FREE=1.
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise. If error is returned, the PMR layer might retry.
+ On error, factory implementations should modify the contents
+ of the PMR_IMPL_ZOMBIEPAGES object reflecting any changes in
+ underlying memory as a result of the initial (failed) call.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_FREE_ZOMBIE_PAGES_FN)(PMR_IMPL_ZOMBIEPAGES pvZombiePages);
+#endif
+
/*! PMR factory callback table.
*/
struct _PMR_IMPL_FUNCTAB_ {
@@ -533,6 +577,11 @@ struct _PMR_IMPL_FUNCTAB_ {
/*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */
PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap;
+#ifdef SUPPORT_PMR_PAGES_DEFERRED_FREE
+ /*! Callback function pointer, see ::PFN_FREE_ZOMBIE_PAGES_FN */
+ PFN_FREE_ZOMBIE_PAGES_FN pfnFreeZombiePages;
+#endif
+
/*! Callback function pointer, see ::PFN_MMAP_FN */
PFN_MMAP_FN pfnMMap;
diff --git a/drivers/gpu/img-rogue/23.2/services/shared/common/ra.c b/drivers/gpu/img-rogue/23.2/services/shared/common/ra.c
index dc4df4b..e792c1c 100644
--- a/drivers/gpu/img-rogue/23.2/services/shared/common/ra.c
+++ b/drivers/gpu/img-rogue/23.2/services/shared/common/ra.c
@@ -215,6 +215,12 @@ struct _RA_ARENA_ITERATOR_
IMG_BOOL bIncludeFreeSegments;
};
+/* Enum for selecting behaviour of _ConvertAndFree* functions*/
+typedef enum {
+ CONVERT_AND_FREE = 0,
+ CONVERT_DONT_FREE = 1
+} RA_CONVERT_AND_FREE_BEHAVIOUR;
+
static PVRSRV_ERROR _RA_FreeMultiUnlocked(RA_ARENA *pArena,
RA_BASE_ARRAY_T aBaseArray,
RA_BASE_ARRAY_SIZE_T uiBaseArraySize);
@@ -1146,28 +1152,32 @@ _ConvertGhostBaseToReal(RA_ARENA *pArena,
}
/*************************************************************************/ /*!
- * @Function _FreeGhostBasesFromReal
+ * @Function _ConvertAndFreeStartFromGhost
*
- * @Description Given a ghost base and size, free the contiguous ghost bases from the
- * real base. This has the effect of shrinking the size of the real base.
- * If ghost pages remain after the free region, a new Real base will be
+ * @Description Given a ghost base and size, convert and free contiguous
+ * ghost bases. This has the effect of shrinking the size of
+ * the real base holding the range to free. If ghost pages
+ * remain after the free region, a new Real base will be
* created to host them.
+ *
* @Input pArena - The RA Arena to free the Ghost Bases from.
* @Input aBaseArray - The array to remove bases from
* @Input uiBaseArraySize - The size of the Base array to free from.
* @Input uiChunkSize - The chunk size used to generate the Ghost Bases.
* @Input ui32GhostBaseIndex - The index into the array of the initial Ghost base to free
* @Input ui32FreeCount - The number of Ghost bases to free from the Real base.
+ * @Input eBehaviour - Specifies if the function should convert and free or only convert.
*
* @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
*/ /**************************************************************************/
static PVRSRV_ERROR
-_FreeGhostBasesFromReal(RA_ARENA *pArena,
- RA_BASE_ARRAY_T aBaseArray,
- RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
- RA_LENGTH_T uiChunkSize,
- IMG_UINT32 ui32GhostBaseIndex,
- IMG_UINT32 ui32FreeCount)
+_ConvertAndFreeStartFromGhost(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 ui32GhostBaseIndex,
+ IMG_UINT32 ui32FreeCount,
+ RA_CONVERT_AND_FREE_BEHAVIOUR eBehaviour)
{
PVRSRV_ERROR eError;
RA_BASE_T uiRealBase;
@@ -1207,17 +1217,20 @@ _FreeGhostBasesFromReal(RA_ARENA *pArena,
PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
}
- /* Free the region calculated */
- eError = _FreeSingleBaseArray(pArena,
- &aBaseArray[ui32GhostBaseIndex],
- ui32FreeCount);
- PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ if (eBehaviour == CONVERT_AND_FREE)
+ {
+ /* Free the region calculated */
+ eError = _FreeSingleBaseArray(pArena,
+ &aBaseArray[ui32GhostBaseIndex],
+ ui32FreeCount);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ }
return eError;
}
/*************************************************************************/ /*!
- * @Function _ConvertGhostBaseFreeReal
+ * @Function _ConvertAndFreeStartFromReal
*
* @Description Used in the case that we want to keep some indices that are ghost pages
* but the indices to free start with the real base. In this case we can
@@ -1228,15 +1241,17 @@ _FreeGhostBasesFromReal(RA_ARENA *pArena,
* @Input aBaseArray - The Base array to free from.
* @Input uiChunkSize - The chunk size used to generate the Ghost bases.
* @Input uiGhostBaseIndex - The index into the array of the Ghost base to convert.
+ * @Input eBehaviour - Specifies if the function should convert and free or only convert.
*
* @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
*/ /**************************************************************************/
static PVRSRV_ERROR
-_ConvertGhostBaseFreeReal(RA_ARENA *pArena,
- RA_BASE_ARRAY_T aBaseArray,
- RA_LENGTH_T uiChunkSize,
- IMG_UINT32 uiRealBaseIndex,
- IMG_UINT32 uiGhostBaseIndex)
+_ConvertAndFreeStartFromReal(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 uiRealBaseIndex,
+ IMG_UINT32 uiGhostBaseIndex,
+ RA_CONVERT_AND_FREE_BEHAVIOUR eBehaviour)
{
PVRSRV_ERROR eError;
RA_BASE_T uiRealBase = aBaseArray[uiRealBaseIndex];
@@ -1249,18 +1264,22 @@ _ConvertGhostBaseFreeReal(RA_ARENA *pArena,
uiChunkSize);
PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
- eError = _FreeSingleBaseArray(pArena,
- &aBaseArray[uiRealBaseIndex],
- uiGhostBaseIndex - uiRealBaseIndex);
- PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray");
+ if (eBehaviour == CONVERT_AND_FREE)
+ {
+ eError = _FreeSingleBaseArray(pArena,
+ &aBaseArray[uiRealBaseIndex],
+ uiGhostBaseIndex - uiRealBaseIndex);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray");
+ }
return eError;
}
/*************************************************************************/ /*!
- * @Function _FreeBaseArraySlice
+ * @Function _ConvertAndFreeBaseArraySlice
*
- * @Description Free Bases in an Array Slice.
+ * @Description Convert and maybe free Bases in an Array Slice.
+ * This function might convert some ghosts into real bases.
* This function assumes that the slice is within a single Real base alloc.
* i.e the uiFreeStartIndex and uiFreeCount remain fully within a single real
* base alloc and do not cross into another Real base region.
@@ -1271,16 +1290,18 @@ _ConvertGhostBaseFreeReal(RA_ARENA *pArena,
* @Input uiChunkSize - The base chunk size used to generate the Ghost bases.
* @Input uiFreeStartIndex - The index in the array to start freeing from
* @Input uiFreeCount - The number of bases to free.
+ * @Input eBehaviour - Specifies if the function should convert and free or only convert.
*
* @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
*/ /**************************************************************************/
static PVRSRV_ERROR
-_FreeBaseArraySlice(RA_ARENA *pArena,
- RA_BASE_ARRAY_T aBaseArray,
- RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
- RA_LENGTH_T uiChunkSize,
- IMG_UINT32 uiFreeStartIndex,
- IMG_UINT32 uiFreeCount)
+_ConvertAndFreeBaseArraySlice(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 uiFreeStartIndex,
+ IMG_UINT32 uiFreeCount,
+ RA_CONVERT_AND_FREE_BEHAVIOUR eBehaviour)
{
/*3 cases:
* Key: () = Region to Free
@@ -1322,32 +1343,37 @@ _FreeBaseArraySlice(RA_ARENA *pArena,
RA_BASE_IS_REAL(aBaseArray[uiFreeStartIndex + uiFreeCount]) ||
RA_BASE_IS_INVALID(aBaseArray[uiFreeStartIndex + uiFreeCount]))
{
- eError = _FreeSingleBaseArray(pArena,
- &aBaseArray[uiFreeStartIndex],
- uiFreeCount);
- PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray");
+ if (eBehaviour == CONVERT_AND_FREE)
+ {
+ eError = _FreeSingleBaseArray(pArena,
+ &aBaseArray[uiFreeStartIndex],
+ uiFreeCount);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray");
+ }
}
/* Case 2*/
else
{
- eError = _ConvertGhostBaseFreeReal(pArena,
- aBaseArray,
- uiChunkSize,
- uiFreeStartIndex,
- uiFreeStartIndex + uiFreeCount);
- PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ eError = _ConvertAndFreeStartFromReal(pArena,
+ aBaseArray,
+ uiChunkSize,
+ uiFreeStartIndex,
+ uiFreeStartIndex + uiFreeCount,
+ eBehaviour);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeStartFromReal");
}
}
/* Case 3 */
else if (RA_BASE_IS_GHOST(aBaseArray[uiFreeStartIndex]))
{
- eError = _FreeGhostBasesFromReal(pArena,
- aBaseArray,
- uiBaseArraySize,
- uiChunkSize,
- uiFreeStartIndex,
- uiFreeCount);
- PVR_LOG_RETURN_IF_ERROR(eError, "_FreeGhostBasesFromReal");
+ eError = _ConvertAndFreeStartFromGhost(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ uiFreeStartIndex,
+ uiFreeCount,
+ eBehaviour);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeStartFromGhost");
}
/* Attempt to free an invalid base, this could be a duplicated
* value in the free sparse index array */
@@ -2802,13 +2828,14 @@ _RA_FreeMultiUnlockedSparse(RA_ARENA *pArena,
/* Handle case where we only have 1 base to free. */
if (uiFreeCount == 1)
{
- eError = _FreeBaseArraySlice(pArena,
- aBaseArray,
- uiBaseArraySize,
- uiChunkSize,
- puiFreeIndices[0],
- 1);
- PVR_LOG_IF_ERROR(eError, "_FreeBaseArraySlice");
+ eError = _ConvertAndFreeBaseArraySlice(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ puiFreeIndices[0],
+ 1,
+ CONVERT_AND_FREE);
+ PVR_LOG_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice");
if (eError == PVRSRV_OK)
{
*puiFreeCount = uiFreeCount;
@@ -2832,13 +2859,14 @@ _RA_FreeMultiUnlockedSparse(RA_ARENA *pArena,
uiConsolidate++;
}
- eError = _FreeBaseArraySlice(pArena,
- aBaseArray,
- uiBaseArraySize,
- uiChunkSize,
- puiFreeIndices[i],
- uiConsolidate);
- PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArraySlice");
+ eError = _ConvertAndFreeBaseArraySlice(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ puiFreeIndices[i],
+ uiConsolidate,
+ CONVERT_AND_FREE);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice");
i += uiConsolidate;
*puiFreeCount += uiConsolidate;
@@ -2887,6 +2915,151 @@ RA_FreeMultiSparse(RA_ARENA *pArena,
return eError;
}
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+/*************************************************************************/ /*!
+ * @Function _RA_RealiseMultiSparseIndicesUnlocked
+ *
+ * @Description Given an array of indices to extract, prepares the base
+ * array so that the indices in puiExtractIndices can be
+ * moved to another base array.
+ * Called when some pages of the base array need to be
+ * transferred to another base array. As a result of this call,
+ * some ghost addresses in aBaseArray might be converted to
+ * real addresses.
+ *
+ * @Input pArena - The RA Arena to free the bases on.
+ * @Input aBaseArray - The Base array to free from
+ * @Input uiBaseArraySize - The Size of the base array to free.
+ * @Input uiChunkSize - The Base chunk size used to generate ghost entries
+ * @Input puiExtractIndices - Array of indices to extract
+ * @Input puiExtractCount - Number of indices to extract
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code otherwise.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_RA_RealiseMultiSparseIndicesUnlocked(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 *puiExtractIndices,
+ IMG_UINT32 *puiExtractCount)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiExtractCount = *puiExtractCount;
+ *puiExtractCount = 0;
+
+ /* Handle case where we only have 1 base to extract. */
+ if (uiExtractCount == 1)
+ {
+ eError = _ConvertAndFreeBaseArraySlice(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ puiExtractIndices[0],
+ 1,
+ CONVERT_DONT_FREE);
+ PVR_LOG_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice");
+ if (eError == PVRSRV_OK)
+ {
+ *puiExtractCount = uiExtractCount;
+ }
+ return eError;
+ }
+
+ for (i = 0; i < uiExtractCount;)
+ {
+ IMG_UINT32 j;
+ IMG_UINT32 uiConsolidate = 1;
+
+ PVR_ASSERT(RA_BASE_IS_REAL(aBaseArray[i]));
+
+ for (j = i;
+ puiExtractIndices[j + 1] == puiExtractIndices[j] + 1 &&
+ RA_BASE_IS_GHOST(aBaseArray[puiExtractIndices[j + 1]]) &&
+ j + 1 < uiExtractCount;
+ j++)
+ {
+ uiConsolidate++;
+ }
+
+ eError = _ConvertAndFreeBaseArraySlice(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ puiExtractIndices[i],
+ uiConsolidate,
+ CONVERT_DONT_FREE);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertAndFreeBaseArraySlice");
+
+ i += uiConsolidate;
+ *puiExtractCount += uiConsolidate;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_TransferMultiSparseIndices(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aSrcBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiSrcBaseArraySize,
+ RA_BASE_ARRAY_T aDstBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiDstBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 *puiTransferIndices,
+ IMG_UINT32 *puiTransferCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+
+ PVR_LOG_RETURN_IF_FALSE(puiTransferCount != NULL,
+ "puiTransferCount Required",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Ensure source Base Array is large enough for intended extract */
+ PVR_LOG_RETURN_IF_FALSE(uiSrcBaseArraySize >= *puiTransferCount,
+ "Attempt to transfer more bases than src array holds",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Ensure dst Base Array is large enough for intended extract */
+ PVR_LOG_RETURN_IF_FALSE(uiDstBaseArraySize <= *puiTransferCount,
+ "Attempt to transfer more bases than dst array holds",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ PVR_LOG_RETURN_IF_FALSE(puiTransferIndices != NULL,
+ "puiTransferIndices Required",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ PVR_LOG_RETURN_IF_FALSE(uiLog2ChunkSize >= RA_BASE_FLAGS_LOG2 &&
+ uiLog2ChunkSize <= RA_BASE_CHUNK_LOG2_MAX,
+ "Log2 chunk size must be 12-64",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ /* First prepare the base array for subsequent transfer */
+ eError = _RA_RealiseMultiSparseIndicesUnlocked(pArena,
+ aSrcBaseArray,
+ uiSrcBaseArraySize,
+ 1ULL << uiLog2ChunkSize,
+ puiTransferIndices,
+ puiTransferCount);
+ OSLockRelease(pArena->hLock);
+ PVR_GOTO_IF_ERROR(eError, e0);
+
+ /* Now do the transfer */
+ for (i=0; i<*puiTransferCount; i++)
+ {
+ IMG_UINT32 idxSrc = puiTransferIndices[i];
+ aDstBaseArray[i] = aSrcBaseArray[idxSrc];
+ aSrcBaseArray[idxSrc] = INVALID_BASE_ADDR;
+ }
+
+ return PVRSRV_OK;
+e0:
+ return eError;
+}
+#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */
+
static PVRSRV_ERROR
_TrimBlockMakeReal(RA_ARENA *pArena,
RA_BASE_ARRAY_T aBaseArray,
diff --git a/drivers/gpu/img-rogue/23.2/services/shared/include/ra.h b/drivers/gpu/img-rogue/23.2/services/shared/include/ra.h
index 654c24d..d62df6d 100644
--- a/drivers/gpu/img-rogue/23.2/services/shared/include/ra.h
+++ b/drivers/gpu/img-rogue/23.2/services/shared/include/ra.h
@@ -70,7 +70,7 @@ typedef struct _RA_ITERATOR_DATA_ {
typedef struct _RA_USAGE_STATS {
IMG_UINT64 ui64TotalArenaSize;
IMG_UINT64 ui64FreeArenaSize;
-}RA_USAGE_STATS, *PRA_USAGE_STATS;
+} RA_USAGE_STATS, *PRA_USAGE_STATS;
/*
* Per-Arena handle - this is private data for the caller of the RA.
@@ -521,6 +521,39 @@ RA_FreeMultiSparse(RA_ARENA *pArena,
IMG_UINT32 *puiFreeIndices,
IMG_UINT32 *puiFreeCount);
+#if defined(SUPPORT_PMR_PAGES_DEFERRED_FREE)
+/**
+ * @Function RA_TransferMultiSparseIndices
+ *
+ * @Description Transfers a set of indices specified in puiTransferIndices from
+ * aSrcBaseArray to aDstBaseArray.
+ * Called when some pages of the base array need to be
+ * transfered to another base array. As a result of this call,
+ * some ghost addresses in aBaseArray might be converted to
+ * real addresses before being transferred..
+ *
+ * @Input pArena - The arena the segment was originally allocated from.
+ * @Input aSrcBaseArray - The array to transfer bases from.
+ * @Input uiSrcBaseArraySize - Size of the array to transfer bases from.
+ * @Input aDstBaseArray - The array to transfer bases to.
+ * @Input uiDstBaseArraySize - Size of the array to transfer bases to.
+ * @Input uiLog2ChunkSize - The log2 chunk size used to generate the Ghost bases.
+ * @Input puiTransferIndices - The indices into the array to be extracted.
+ * @InOut puiTransferCount - The number of bases to prepare for extraction.
+ *
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_TransferMultiSparseIndices(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aSrcBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiSrcBaseArraySize,
+ RA_BASE_ARRAY_T aDstBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiDstBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 *puiTransferIndices,
+ IMG_UINT32 *puiTransferCount);
+#endif /* defined(SUPPORT_PMR_PAGES_DEFERRED_FREE) */
+
/**
* @Function RA_Alloc_Range
*
diff --git a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmicmd.c b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmicmd.c
index 6f1b798..4d80714 100644
--- a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmicmd.c
+++ b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmicmd.c
@@ -1716,6 +1716,82 @@ void mt_hdmi_set_edid(int en, char *file_name, int len)
HDMI_ATTR_SPRINTF("set test edid done\n");
}
+void mt_hdmi_load_denylist(int en, char *file_name)
+{
+ const struct firmware *fw_entry = NULL;
+ int ret;
+ unsigned int i = 0;
+ unsigned char _all0xFF[sizeof(struct DenyList_DATA)];
+
+ HDMI_ATTR_SPRINTF("filename %s and enable %d\n", file_name, en);
+
+ if (!en) {
+ goto NotUse;
+ }
+
+ /* read file from /vendor/firmware */
+ ret = request_firmware(&fw_entry, file_name, &(hdmi_pdev->dev));
+ if (ret != 0) {
+ HDMI_ATTR_SPRINTF("request %s file fail(%d)!!!\n", file_name, ret);
+ if (fw_entry)
+ release_firmware(fw_entry);
+ goto NotUse;
+ }
+
+ HDMI_ATTR_SPRINTF("filename %s len %lu\n", file_name, fw_entry->size);
+
+ if (fw_entry->size > DENYLIST_FILE_MAX_LEN) {
+ HDMI_ATTR_SPRINTF("invalid content size 0x%lx\n", fw_entry->size);
+ HDMI_ATTR_SPRINTF("The maximun file size is 0x%d\n", DENYLIST_FILE_MAX_LEN);
+ release_firmware(fw_entry);
+ goto NotUse;
+ }
+
+ memset(_all0xFF, 0xFF, sizeof(_all0xFF));
+
+ if (memcmp(fw_entry->data, _all0xFF, sizeof(_all0xFF)) == 0) {
+ HDMI_ATTR_SPRINTF("First 32 bytes are all 0xFF, not use the file as deny list\n");
+ release_firmware(fw_entry);
+ goto NotUse;
+ }
+
+ memcpy(gDenyList.DL_DATA, fw_entry->data, fw_entry->size);
+ gDenyList.u1NumSize = fw_entry->size / sizeof(struct DenyList_DATA);
+ gDenyList.u1DataFromFile = true;
+ release_firmware(fw_entry);
+
+ for (i = 0; i < gDenyList.u1NumSize; i++) {
+ HDMI_ATTR_SPRINTF("deny list %d u1OpType: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1OpType);
+ HDMI_ATTR_SPRINTF("deny list %d valid.u1CheckIdManufacturerName: 0x%x\n",
+ i, gDenyList.DL_DATA[i].valid.u1CheckIdManufacturerName);
+ HDMI_ATTR_SPRINTF("deny list %d valid.u1CheckIdProductCode: 0x%x\n",
+ i, gDenyList.DL_DATA[i].valid.u1CheckIdProductCode);
+ HDMI_ATTR_SPRINTF("deny list %d valid.u1CheckYearOfManufacturer: 0x%x\n",
+ i, gDenyList.DL_DATA[i].valid.u1CheckYearOfManufacturer);
+ HDMI_ATTR_SPRINTF("deny list %d valid.u1CheckDolbyTrueHdChType192: 0x%x\n",
+ i, gDenyList.DL_DATA[i].valid.u1CheckDolbyTrueHdChType192);
+ HDMI_ATTR_SPRINTF("deny list %d u1IdManufacturerName[0]: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1IdManufacturerName[0]);
+ HDMI_ATTR_SPRINTF("deny list %d u1IdManufacturerName[1]: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1IdManufacturerName[1]);
+ HDMI_ATTR_SPRINTF("deny list %d u1IdProductCode[0]: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1IdProductCode[0]);
+ HDMI_ATTR_SPRINTF("deny list %d u1IdProductCode[1]: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1IdProductCode[1]);
+ HDMI_ATTR_SPRINTF("deny list %d u1YearOfManufacturer: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1YearOfManufacturer);
+ HDMI_ATTR_SPRINTF("deny list %d u1DolbyTrueHdChType192: 0x%x\n",
+ i, gDenyList.DL_DATA[i].u1DolbyTrueHdChType192);
+ }
+
+ HDMI_ATTR_SPRINTF("load deny list done\n");
+ return;
+NotUse:
+ gDenyList.u1DataFromFile = false;
+ HDMI_ATTR_SPRINTF("Not use the file as deny list data\n");
+}
+
void mt_hdmi_show_edid_info(void)
{
unsigned int u4Res = 0;
@@ -4574,7 +4650,7 @@ static void process_dbg_cmd(char *opt)
{
char *oprand;
int ret, i, size, en;
- char testbuf[500] = {0};
+ char testbuf[CMD_TEST_BUF_MAX_LEN] = {0};
struct VID_PLA_HDR_METADATA_INFO_T hdr_metadata;
if (strncmp(opt, "r:", 2) == 0)
@@ -5125,6 +5201,24 @@ static void process_dbg_cmd(char *opt)
fgCaHDMIGetContentStreamManage(&ui4mode);
TX_DEF_LOG("get content stream manage: %x\n", ui4mode);
+ } else if (strncmp(opt, "denylist:", 9) == 0) {
+ char *arg = NULL;
+
+ opt = opt + 9;
+ // input file name
+ arg = strsep(&opt, ",");
+ if (arg == NULL || strlen(arg) > (CMD_TEST_BUF_MAX_LEN-1))
+ goto Error;
+ strncpy(testbuf, arg, (CMD_TEST_BUF_MAX_LEN-1));
+ testbuf[CMD_TEST_BUF_MAX_LEN-1] = '\0';
+
+ arg = strsep(&opt, ",");
+ if (arg == NULL)
+ goto Error;
+ if (kstrtoint(arg, DECIMAL_CONVERT, &en))
+ goto Error;
+
+ mt_hdmi_load_denylist(en, testbuf);
} else
goto Error;
return;
diff --git a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmiedid.c b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmiedid.c
index 324cf46..d453ee3 100644
--- a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmiedid.c
+++ b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/hdmiedid.c
@@ -140,6 +140,8 @@ const unsigned char _cBitdeepStr[][7] = { {"16bit "},
unsigned char cDstStr[60];
unsigned char cDstBitStr[30];
+struct DenyList gDenyList = {0};
+
bool isBadYUV420Sink(void);
bool fgIsHdmiNoEDIDCheck(void)
@@ -3916,113 +3918,193 @@ long long hdmi_DispGetEdidInfo(void)
return u4Resolution;
}
-bool isThomsonUD9_India_TV(void)
+bool isDenyListSink(enum DenyList_Operation_T OpCode)
{
- /* THOMSON UD9 43TH6000 TV (India only TV) has Dolby audio playback issue.
- * hide Dolby audio capability of this TV based on Manufacturer ID
- * and Manufacturer product code.
- * 00 ff ff ff ff ff ff 00 0e 96 03 b1 01 00 00 00
- */
- if ((_bEdidData[0x08] == 0x0e) && (_bEdidData[0x09] == 0x96) &&
- (_bEdidData[0x0a] == 0x03) && (_bEdidData[0x0b] == 0xb1))
- return true;
+ unsigned int i = 0;
+ bool bRet = false;
+ unsigned int ui4_hdmi_dv_truehd_ch_type =
+ ((_HdmiSinkAvCap.ui1_sink_mat_mlp_ch_sampling[0]) |
+ (_HdmiSinkAvCap.ui1_sink_mat_mlp_ch_sampling[4] << 8) |
+ (_HdmiSinkAvCap.ui1_sink_mat_mlp_ch_sampling[6] << 16));
+ HDMI_PLUG_LOG("ui4_hdmi_dv_truehd_ch_type %d\n", ui4_hdmi_dv_truehd_ch_type);
+
+ for (i = 0; i < gDenyList.u1NumSize; i++) {
+ if (gDenyList.DL_DATA[i].u1OpType == OpCode) {
+ bRet = true;
+ if (gDenyList.DL_DATA[i].valid.u1CheckIdManufacturerName) {
+ if ((_bEdidData[0x08] != gDenyList.DL_DATA[i].u1IdManufacturerName[0]) ||
+ (_bEdidData[0x09] != gDenyList.DL_DATA[i].u1IdManufacturerName[1])) {
+ bRet = false;
+ continue;
+ }
+ }
+
+ if (gDenyList.DL_DATA[i].valid.u1CheckIdProductCode) {
+ if ((_bEdidData[0x0a] != gDenyList.DL_DATA[i].u1IdProductCode[0]) ||
+ (_bEdidData[0x0b] != gDenyList.DL_DATA[i].u1IdProductCode[1])) {
+ bRet = false;
+ continue;
+ }
+ }
+
+ if (gDenyList.DL_DATA[i].valid.u1CheckYearOfManufacturer) {
+ if (_bEdidData[0x11] != gDenyList.DL_DATA[i].u1YearOfManufacturer) {
+ bRet = false;
+ continue;
+ }
+ }
+
+ if (gDenyList.DL_DATA[i].valid.u1CheckDolbyTrueHdChType192){
+ if ((ui4_hdmi_dv_truehd_ch_type & (1 << 6)) !=
+ (gDenyList.DL_DATA[i].u1DolbyTrueHdChType192 << 6)) {
+ bRet = false;
+ continue;
+ }
+ }
+
+ break;
+ }
+ }
+ return bRet;
+}
+
+bool isBadDolbyAudioSink(void)
+{
+ if (!gDenyList.u1DataFromFile) {
+ /* THOMSON UD9 43TH6000 TV (India only TV) has Dolby audio playback issue.
+ * hide Dolby audio capability of this TV based on Manufacturer ID
+ * and Manufacturer product code.
+ * 00 ff ff ff ff ff ff 00 0e 96 03 b1 01 00 00 00
+ */
+ if ((_bEdidData[0x08] == 0x0e) && (_bEdidData[0x09] == 0x96) &&
+ (_bEdidData[0x0a] == 0x03) && (_bEdidData[0x0b] == 0xb1)) {
+ HDMI_AUDIO_LOG(
+ "THOMSON UD9 43TH6000 TV (India only TV) remove Dolby audio capability\n");
+ return true;
+ }
+ } else {
+ if (isDenyListSink(DENYLIST_OP_REMOVE_DOLBY_AUDIO)) {
+ HDMI_AUDIO_LOG("isBadDolbyAudioSink\n");
+ return true;
+ }
+ }
return false;
}
bool isBadDDPlusSink(struct HDMI_EDID_T *pv_get_info)
{
- /* Sony - 2009 model DD+ TV (Sony KDL 40S5600 TV)
- * 00 FF FF FF FF FF FF 00 4D D9 01 C9 01 01 01 01
- * Note: The official document states that this sink does not support DD+.
- */
- if ((_bEdidData[0x08] == 0x4d) && (_bEdidData[0x09] == 0xd9) &&
- (_bEdidData[0x0a] == 0x01) && (_bEdidData[0x0b] == 0xc9))
- return true;
+ if (!gDenyList.u1DataFromFile) {
+ /* Sony - 2009 model DD+ TV (Sony KDL 40S5600 TV)
+ * 00 FF FF FF FF FF FF 00 4D D9 01 C9 01 01 01 01
+ * Note: The official document states that this sink does not support DD+.
+ */
+ if ((_bEdidData[0x08] == 0x4d) && (_bEdidData[0x09] == 0xd9) &&
+ (_bEdidData[0x0a] == 0x01) && (_bEdidData[0x0b] == 0xc9))
+ return true;
+ } else {
+ if (isDenyListSink(DENYLIST_OP_REMOVE_DOLBY_DIGITAL_PLUS)) {
+ HDMI_AUDIO_LOG("isBadDDPlusSink\n");
+ return true;
+ }
+ }
return false;
}
bool isBadMATSink(struct HDMI_EDID_T *pv_get_info)
{
- /* Sony - G/H model MAT TV's has MAT implementation issue
- * confirmed by their SoC vendor MTK and DOLBY.
- * This leads no audio for MAT input.
- * Disable MAT audio capability of these Sony TV model without
- * impacting Sony Soundbar or AVR of those year using sampling rate
- * as differentiator.
- * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
- */
- if (((_bEdidData[0x08] == 0x4d) && (_bEdidData[0x09] == 0xd9)) &&
- ((_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
- return true;
- /* Philips - 2019/2020 model Dolby MAT TV
- * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
- */
- if (((_bEdidData[0x08] == 0x41) && (_bEdidData[0x09] == 0x0c)) &&
- ((_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
- return true;
- /* Panasonic - 2019/2020 model Dolby MAT TV
- * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
- */
- if (((_bEdidData[0x08] == 0x34) && (_bEdidData[0x09] == 0xa9)) &&
- ((_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
- return true;
- /* Hisense - 2018/2019/2020 model Dolby MAT TV
- * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
- */
- if (((_bEdidData[0x08] == 0x20) && (_bEdidData[0x09] == 0xa3)) &&
- ((_bEdidData[0x11] == 0x1c) || (_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
- return true;
- /* AMBEO - 2020 model Dolby MAT TV
- * 00 FF FF FF FF FF FF 00 63 18 15 96 00 00 01 00
- * Note: This sink has 192kHz Sampleing Rates in MAT audio format.
- */
- if ((_bEdidData[0x08] == 0x63) && (_bEdidData[0x09] == 0x18) &&
- (_bEdidData[0x0a] == 0x15) && (_bEdidData[0x0b] == 0x96) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
- return true;
- /* MRX 520 - 2015 model AVR
- * 00 FF FF FF FF FF FF 00 06 8d 01 00 7b 03 1e 41
- * Note: Official site presents this sink does not support MAT.
- */
- if ((_bEdidData[0x08] == 0x06) && (_bEdidData[0x09] == 0x8d) &&
- (_bEdidData[0x0a] == 0x01) && (_bEdidData[0x0b] == 0x00) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
- return true;
- /* Samsung 32J590UQN with ViewHD VHD-UHAE2 audio extractor
- * 00 ff ff ff ff ff ff 00 4c 2d 34 0f 45 32 39 30
- * Note: This sink does not support MAT audio.
- */
- if ((_bEdidData[0x08] == 0x4c) && (_bEdidData[0x09] == 0x2d) &&
- (_bEdidData[0x0a] == 0x34) && (_bEdidData[0x0b] == 0x0f) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
- return true;
- /* Onkyo NR747 - 2015 model AVR
- * 00 FF FF FF FF FF FF 00 3D CB 71 0F 00 00 00 00
- * Note: This sink does not support MAT audio.
- */
- if ((_bEdidData[0x08] == 0x3D) && (_bEdidData[0x09] == 0xCB) &&
- (_bEdidData[0x0a] == 0x71) && (_bEdidData[0x0b] == 0x0F) &&
- ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
- return true;
-
+ if (!gDenyList.u1DataFromFile) {
+ /* Sony - G/H model MAT TV's has MAT implementation issue
+ * confirmed by their SoC vendor MTK and DOLBY.
+ * This leads no audio for MAT input.
+ * Disable MAT audio capability of these Sony TV model without
+ * impacting Sony Soundbar or AVR of those year using sampling rate
+ * as differentiator.
+ * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
+ */
+ if (((_bEdidData[0x08] == 0x4d) && (_bEdidData[0x09] == 0xd9)) &&
+ ((_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
+ return true;
+ /* Philips - 2019/2020 model Dolby MAT TV
+ * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
+ */
+ if (((_bEdidData[0x08] == 0x41) && (_bEdidData[0x09] == 0x0c)) &&
+ ((_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
+ return true;
+ /* Panasonic - 2019/2020 model Dolby MAT TV
+ * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
+ */
+ if (((_bEdidData[0x08] == 0x34) && (_bEdidData[0x09] == 0xa9)) &&
+ ((_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
+ return true;
+ /* Hisense - 2018/2019/2020 model Dolby MAT TV
+ * Note: This sink has no 192kHz Sampleing Rates in MAT audio format.
+ */
+ if (((_bEdidData[0x08] == 0x20) && (_bEdidData[0x09] == 0xa3)) &&
+ ((_bEdidData[0x11] == 0x1c) || (_bEdidData[0x11] == 0x1d) || (_bEdidData[0x11] == 0x1e)) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == 0x0))
+ return true;
+ /* AMBEO - 2020 model Dolby MAT TV
+ * 00 FF FF FF FF FF FF 00 63 18 15 96 00 00 01 00
+ * Note: This sink has 192kHz Sampleing Rates in MAT audio format.
+ */
+ if ((_bEdidData[0x08] == 0x63) && (_bEdidData[0x09] == 0x18) &&
+ (_bEdidData[0x0a] == 0x15) && (_bEdidData[0x0b] == 0x96) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
+ return true;
+ /* MRX 520 - 2015 model AVR
+ * 00 FF FF FF FF FF FF 00 06 8d 01 00 7b 03 1e 41
+ * Note: Official site presents this sink does not support MAT.
+ */
+ if ((_bEdidData[0x08] == 0x06) && (_bEdidData[0x09] == 0x8d) &&
+ (_bEdidData[0x0a] == 0x01) && (_bEdidData[0x0b] == 0x00) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
+ return true;
+ /* Samsung 32J590UQN with ViewHD VHD-UHAE2 audio extractor
+ * 00 ff ff ff ff ff ff 00 4c 2d 34 0f 45 32 39 30
+ * Note: This sink does not support MAT audio.
+ */
+ if ((_bEdidData[0x08] == 0x4c) && (_bEdidData[0x09] == 0x2d) &&
+ (_bEdidData[0x0a] == 0x34) && (_bEdidData[0x0b] == 0x0f) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
+ return true;
+ /* Onkyo NR747 - 2015 model AVR
+ * 00 FF FF FF FF FF FF 00 3D CB 71 0F 00 00 00 00
+ * Note: This sink does not support MAT audio.
+ */
+ if ((_bEdidData[0x08] == 0x3D) && (_bEdidData[0x09] == 0xCB) &&
+ (_bEdidData[0x0a] == 0x71) && (_bEdidData[0x0b] == 0x0F) &&
+ ((pv_get_info->ui4_hdmi_dolby_truehd_ch_type & (1 << 6)) == (0x1 << 6)))
+ return true;
+ } else {
+ if (isDenyListSink(DENYLIST_OP_REMOVE_DOLBY_MAT)) {
+ HDMI_AUDIO_LOG("isBadMATSink\n");
+ return true;
+ }
+ }
return false;
}
bool isBadYUV420Sink(void)
{
- /* Sony - 2018 model Samsung UN50NU6900B TV
- * 00 FF FF FF FF FF FF 00 4C 2D 14 0F 00 0E 00 01
- * Note: When open UHD, the TV not support YUV420, disable YUV420 mode.
- */
- if ((_bEdidData[0x08] == 0x4C) && (_bEdidData[0x09] == 0x2D) &&
- (_bEdidData[0x0a] == 0x14) && (_bEdidData[0x0b] == 0x0F))
- return true;
+ if (!gDenyList.u1DataFromFile) {
+ /* Sony - 2018 model Samsung UN50NU6900B TV
+ * 00 FF FF FF FF FF FF 00 4C 2D 14 0F 00 0E 00 01
+ * Note: When open UHD, the TV not support YUV420, disable YUV420 mode.
+ */
+ if ((_bEdidData[0x08] == 0x4C) && (_bEdidData[0x09] == 0x2D) &&
+ (_bEdidData[0x0a] == 0x14) && (_bEdidData[0x0b] == 0x0F))
+ return true;
+ } else {
+ if (isDenyListSink(DENYLIST_OP_REMOVE_YUV420)) {
+ HDMI_VIDEO_LOG("isBadYUV420Sink\n");
+ return true;
+ }
+ }
return false;
}
@@ -4384,9 +4466,8 @@ void hdmi_AppGetEdidInfo(
for (i = 0; i < EDID_LENGTH_MISC; i++)
pv_get_info->ui1rawdata_edid[i] = _bEdidData[i];
- if (isThomsonUD9_India_TV()) {
- HDMI_AUDIO_LOG(
-"THOMSON UD9 43TH6000 TV (India only TV) remove Dolby audio capability\n");
+ if (isBadDolbyAudioSink()) {
+ HDMI_AUDIO_LOG("Known bad Dolby Audio Sink, remove Dolby audio capability\n");
pv_get_info->ui4_hdmi_ac3_ch_type = 0x00;
pv_get_info->ui4_hdmi_ac3_ch3ch4ch5ch7_type = 0x00;
pv_get_info->ui4_hdmi_ec3_ch_type = 0x00;
diff --git a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmicmd.h b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmicmd.h
index 1864e21..ca98908 100644
--- a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmicmd.h
+++ b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmicmd.h
@@ -12,6 +12,8 @@
#define DEBUG_SHIFT_NUM_8 8
#define DECIMAL_CONVERT 10
+#define CMD_TEST_BUF_MAX_LEN 500
+
extern struct platform_device *hdmi_pdev;
extern char debug_buffer[4095];
extern int temp_len;
diff --git a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmiedid.h b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmiedid.h
index 5a55538..9153e46 100644
--- a/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmiedid.h
+++ b/drivers/misc/mediatek/hdmi/hdmitx/mt8696/inc/hdmiedid.h
@@ -309,6 +309,40 @@ struct HDMI_SINK_AV_CAP_T {
extern unsigned char drm_bEdidData[256];
+/* Deny List Declaration */
+#define DENYLIST_FILE_MAX_LEN 0x400 //0x20's sizeof(struct DenyList_DATA)
+
+enum DenyList_Operation_T {
+ DENYLIST_OP_REMOVE_DOLBY_AUDIO = 0,
+ DENYLIST_OP_REMOVE_DOLBY_DIGITAL_PLUS,
+ DENYLIST_OP_REMOVE_DOLBY_MAT,
+ DENYLIST_OP_REMOVE_YUV420,
+};
+
+struct DenyList_DATA {
+ unsigned char u1OpType; //DenyList_Operation_T
+ struct type2check {
+ unsigned char u1CheckIdManufacturerName : 1;
+ unsigned char u1CheckIdProductCode : 1;
+ unsigned char u1CheckYearOfManufacturer : 1;
+ unsigned char u1CheckDolbyTrueHdChType192 : 1;
+ unsigned char u1CheckReserve : 4;
+ } valid;
+ unsigned char u1IdManufacturerName[2];
+ unsigned char u1IdProductCode[2];
+ unsigned char u1YearOfManufacturer;
+ unsigned char u1DolbyTrueHdChType192;
+ unsigned char u1Reserve[24];
+};
+
+struct DenyList {
+ bool u1DataFromFile;
+ struct DenyList_DATA DL_DATA[DENYLIST_FILE_MAX_LEN];
+ unsigned char u1NumSize;
+};
+
+extern struct DenyList gDenyList;
+
extern void hdmi_checkedid(unsigned char i1noedid);
extern struct QMS_DEBUG_T qms_debug;
extern unsigned char hdmi_fgreadedid(unsigned char i1noedid);