break;
case gcvHAL_EVENT_COMMIT:
- gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
- Kernel->device->commitMutex,
- gcvINFINITE
- ));
+ if (!Interface->commitMutex)
+ {
+ gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
+ Kernel->device->commitMutex,
+ gcvINFINITE
+ ));
- commitMutexAcquired = gcvTRUE;
+ commitMutexAcquired = gcvTRUE;
+ }
/* Commit an event queue. */
if (Interface->engine == gcvENGINE_BLT)
{
Kernel->eventObj, gcmUINT64_TO_PTR(Interface->u.Event.queue), gcvFALSE));
}
- gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex));
- commitMutexAcquired = gcvFALSE;
+ if (!Interface->commitMutex)
+ {
+ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex));
+ commitMutexAcquired = gcvFALSE;
+ }
break;
case gcvHAL_COMMIT:
- gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
- Kernel->device->commitMutex,
- gcvINFINITE
- ));
- commitMutexAcquired = gcvTRUE;
+ if (!Interface->commitMutex)
+ {
+ gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
+ Kernel->device->commitMutex,
+ gcvINFINITE
+ ));
+ commitMutexAcquired = gcvTRUE;
+ }
/* Commit a command and context buffer. */
if (Interface->engine == gcvENGINE_BLT)
}
}
}
- gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex));
- commitMutexAcquired = gcvFALSE;
+ if (!Interface->commitMutex)
+ {
+ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex));
+ commitMutexAcquired = gcvFALSE;
+ }
break;
case gcvHAL_STALL:
));
break;
+ case gcvHAL_DEVICE_MUTEX:
+ if (Interface->u.DeviceMutex.isMutexLocked)
+ {
+ gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
+ Kernel->device->commitMutex,
+ gcvINFINITE
+ ));
+ }
+ else
+ {
+ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex));
+ }
+ break;
+
#if gcdDEC_ENABLE_AHB
case gcvHAL_DEC300_READ:
gcmkONERROR(viv_dec300_read(
/* Wait until GPU finishes access to a resource. */
gcvHAL_WAIT_FENCE,
+ /* Mutex Operation. */
+ gcvHAL_DEVICE_MUTEX,
+
#if gcdDEC_ENABLE_AHB
gcvHAL_DEC300_READ,
gcvHAL_DEC300_WRITE,
/* Ignore information from TSL when doing IO control */
gctBOOL ignoreTLS;
+ /* The mutext already acquired */
+ IN gctBOOL commitMutex;
+
/* Union of command structures. */
union _u
{
}
BottomHalfUnlockVideoMemory;
+ /* gcvHAL_DEVICE_MUTEX: */
+ struct _gcsHAL_DEVICE_MUTEX
+ {
+ /* Lock or Release device mutex. */
+ gctBOOL isMutexLocked;
+ }
+ DeviceMutex;
+
gcsHAL_QUERY_CHIP_OPTIONS QueryChipOptions;
}
u;
data->device = galDevice;
data->pidOpen = _GetProcessID();
+ data->isLocked = gcvFALSE;
/* Attached the process. */
for (i = 0; i < gcdMAX_GPU_COUNT; i++)
gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
}
+ if (data->isLocked)
+ {
+ /* Release the mutex. */
+ gcmkONERROR(gckOS_ReleaseMutex(gcvNULL, device->device->commitMutex));
+ data->isLocked = gcvFALSE;
+ }
+
/* A process gets detached. */
for (i = 0; i < gcdMAX_GPU_COUNT; i++)
{
gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
}
+ if (iface.command == gcvHAL_DEVICE_MUTEX)
+ {
+ if (iface.u.DeviceMutex.isMutexLocked == gcvTRUE)
+ {
+ data->isLocked = gcvTRUE;
+ }
+ else
+ {
+ data->isLocked = gcvFALSE;
+ }
+ }
+
status = gckDEVICE_Dispatch(device->device, &iface);
/* Redo system call after pending signal is handled. */