diff options
Diffstat (limited to 'src/osd/modules/sync')
-rw-r--r-- | src/osd/modules/sync/osdsync.h | 16 | ||||
-rw-r--r-- | src/osd/modules/sync/sync_mini.cpp | 57 | ||||
-rw-r--r-- | src/osd/modules/sync/sync_ntc.cpp | 279 | ||||
-rw-r--r-- | src/osd/modules/sync/sync_os2.cpp | 280 | ||||
-rw-r--r-- | src/osd/modules/sync/sync_sdl.cpp | 136 | ||||
-rw-r--r-- | src/osd/modules/sync/sync_tc.cpp | 117 | ||||
-rw-r--r-- | src/osd/modules/sync/sync_windows.cpp | 177 | ||||
-rw-r--r-- | src/osd/modules/sync/work_osd.cpp | 50 |
8 files changed, 1084 insertions, 28 deletions
diff --git a/src/osd/modules/sync/osdsync.h b/src/osd/modules/sync/osdsync.h index e6e5e2f99c9..46d32b09135 100644 --- a/src/osd/modules/sync/osdsync.h +++ b/src/osd/modules/sync/osdsync.h @@ -24,7 +24,7 @@ struct osd_event; /*----------------------------------------------------------------------------- - osd_event_alloc: allocate a new event + osd_lock_event_alloc: allocate a new event Parameters: @@ -174,4 +174,18 @@ int osd_thread_cpu_affinity(osd_thread *thread, UINT32 mask); -----------------------------------------------------------------------------*/ void osd_thread_wait_free(osd_thread *thread); +//============================================================ +// Scalable Locks +//============================================================ + +struct osd_scalable_lock; + +osd_scalable_lock *osd_scalable_lock_alloc(void); + +INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock); + +void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot); + +void osd_scalable_lock_free(osd_scalable_lock *lock); + #endif /* __OSDSYNC__ */ diff --git a/src/osd/modules/sync/sync_mini.cpp b/src/osd/modules/sync/sync_mini.cpp index 5829eb6d63c..f5e6accb0c7 100644 --- a/src/osd/modules/sync/sync_mini.cpp +++ b/src/osd/modules/sync/sync_mini.cpp @@ -20,6 +20,63 @@ struct _osd_thread { //============================================================ +// osd_lock_alloc +//============================================================ + +osd_lock *osd_lock_alloc(void) +{ + // the minimal implementation does not support threading + // just return a dummy value here + return (osd_lock *)1; +} + + +//============================================================ +// osd_lock_acquire +//============================================================ + +void osd_lock_acquire(osd_lock *lock) +{ + // the minimal implementation does not support threading + // the acquire always "succeeds" +} + + +//============================================================ +// osd_lock_try +//============================================================ + +int osd_lock_try(osd_lock *lock) +{ + // the minimal implementation does not support threading + // the acquire always "succeeds" + return TRUE; +} + + +//============================================================ +// osd_lock_release +//============================================================ + +void osd_lock_release(osd_lock *lock) +{ + // the minimal implementation does not support threading + // do nothing here +} + + +//============================================================ +// osd_lock_free +//============================================================ + +void osd_lock_free(osd_lock *lock) +{ + // the minimal implementation does not support threading + // do nothing here +} + + +//============================================================ // osd_event_alloc //============================================================ diff --git a/src/osd/modules/sync/sync_ntc.cpp b/src/osd/modules/sync/sync_ntc.cpp index c78f67ce64a..462c56ee434 100644 --- a/src/osd/modules/sync/sync_ntc.cpp +++ b/src/osd/modules/sync/sync_ntc.cpp @@ -38,6 +38,16 @@ #include <errno.h> #include <sys/time.h> +struct osd_lock { + volatile pthread_t holder; + INT32 count; +#ifdef PTR64 + INT8 padding[52]; // Fill a 64-byte cache line +#else + INT8 padding[56]; // A bit more padding +#endif +}; + struct osd_event { pthread_mutex_t mutex; pthread_cond_t cond; @@ -59,6 +69,275 @@ struct osd_thread { pthread_t thread; }; +struct osd_scalable_lock +{ + struct + { + volatile INT32 haslock; // do we have the lock? + INT32 filler[64/4-1]; // assumes a 64-byte cache line + } slot[WORK_MAX_THREADS]; // one slot per thread + volatile INT32 nextindex; // index of next slot to use +}; + + +//============================================================ +// Scalable Locks +//============================================================ + +osd_scalable_lock *osd_scalable_lock_alloc(void) +{ + osd_scalable_lock *lock; + + lock = (osd_scalable_lock *)calloc(1, sizeof(*lock)); + if (lock == NULL) + return NULL; + + memset(lock, 0, sizeof(*lock)); + lock->slot[0].haslock = TRUE; + return lock; +} + + +INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock) +{ + INT32 myslot = (atomic_increment32(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1); + +#if defined(__i386__) || defined(__x86_64__) + register INT32 tmp; + __asm__ __volatile__ ( + "1: clr %[tmp] ;" + " xchg %[haslock], %[tmp] ;" + " test %[tmp], %[tmp] ;" + " jne 3f ;" + "2: mov %[haslock], %[tmp] ;" + " test %[tmp], %[tmp] ;" + " jne 1b ;" + " pause ;" + " jmp 2b ;" + "3: " + : [haslock] "+m" (lock->slot[myslot].haslock) + , [tmp] "=&r" (tmp) + : + : "cc" + ); +#elif defined(__ppc__) || defined (__PPC__) || defined(__ppc64__) || defined(__PPC64__) + register INT32 tmp; + __asm__ __volatile__ ( + "1: lwarx %[tmp], 0, %[haslock] \n" + " cmpwi %[tmp], 0 \n" + " bne 3f \n" + "2: lwzx %[tmp], 0, %[haslock] \n" + " cmpwi %[tmp], 0 \n" + " bne 1b \n" + " nop \n" + " nop \n" + " b 2b \n" + "3: li %[tmp], 0 \n" + " stwcx. %[tmp], 0, %[haslock] \n" + " bne- 1b \n" + " lwsync \n" + : [tmp] "=&r" (tmp) + : [haslock] "r" (&lock->slot[myslot].haslock) + : "cr0" + ); +#else + INT32 backoff = 1; + while (!osd_compare_exchange32(&lock->slot[myslot].haslock, TRUE, FALSE)) + { + INT32 backcount; + for (backcount = 0; backcount < backoff; backcount++) + osd_yield_processor(); + backoff <<= 1; + } +#endif + return myslot; +} + + +void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot) +{ +#if defined(__i386__) || defined(__x86_64__) + register INT32 tmp = TRUE; + __asm__ __volatile__ ( + " xchg %[haslock], %[tmp] ;" + : [haslock] "+m" (lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock) + , [tmp] "+r" (tmp) + : + ); +#elif defined(__ppc__) || defined (__PPC__) || defined(__ppc64__) || defined(__PPC64__) + lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock = TRUE; + __asm__ __volatile__ ( " lwsync " : : ); +#else + osd_exchange32(&lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock, TRUE); +#endif +} + +void osd_scalable_lock_free(osd_scalable_lock *lock) +{ + free(lock); +} + +static inline pthread_t osd_compare_exchange_pthread_t(pthread_t volatile *ptr, pthread_t compare, pthread_t exchange) +{ +#ifdef PTR64 + INT64 result = compare_exchange64((INT64 volatile *)ptr, (INT64)compare, (INT64)exchange); +#else + INT32 result = compare_exchange32((INT32 volatile *)ptr, (INT32)compare, (INT32)exchange); +#endif + return (pthread_t)result; +} + +static inline pthread_t osd_exchange_pthread_t(pthread_t volatile *ptr, pthread_t exchange) +{ +#ifdef PTR64 + INT64 result = osd_exchange64((INT64 volatile *)ptr, (INT64)exchange); +#else + INT32 result = atomic_exchange32((INT32 volatile *)ptr, (INT32)exchange); +#endif + return (pthread_t)result; +} + + +//============================================================ +// osd_lock_alloc +//============================================================ + +osd_lock *osd_lock_alloc(void) +{ + osd_lock *lock; + + lock = (osd_lock *)calloc(1, sizeof(osd_lock)); + if (lock == NULL) + return NULL; + + lock->holder = 0; + lock->count = 0; + + return lock; +} + +//============================================================ +// osd_lock_acquire +//============================================================ + +void osd_lock_acquire(osd_lock *lock) +{ + pthread_t current, prev; + + current = pthread_self(); + prev = osd_compare_exchange_pthread_t(&lock->holder, 0, current); + if (prev != nullptr && prev != current) + { + do { + register INT32 spin = 10000; // Convenient spin count + register pthread_t tmp; +#if defined(__i386__) || defined(__x86_64__) + __asm__ __volatile__ ( + "1: pause ;" + " mov %[holder], %[tmp] ;" + " test %[tmp], %[tmp] ;" + " loopne 1b ;" + : [spin] "+c" (spin) + , [tmp] "=&r" (tmp) + : [holder] "m" (lock->holder) + : "cc" + ); +#elif defined(__ppc__) || defined(__PPC__) + __asm__ __volatile__ ( + "1: nop \n" + " nop \n" + " lwzx %[tmp], 0, %[holder] \n" + " cmpwi %[tmp], 0 \n" + " bdnzt eq, 1b \n" + : [spin] "+c" (spin) + , [tmp] "=&r" (tmp) + : [holder] "r" (&lock->holder) + : "cr0" + ); +#elif defined(__ppc64__) || defined(__PPC64__) + __asm__ __volatile__ ( + "1: nop \n" + " nop \n" + " ldx %[tmp], 0, %[holder] \n" + " cmpdi %[tmp], 0 \n" + " bdnzt eq, 1b \n" + : [spin] "+c" (spin) + , [tmp] "=&r" (tmp) + : [holder] "r" (&lock->holder) + : "cr0" + ); +#else + while (--spin > 0 && lock->holder != NULL) + osd_yield_processor(); +#endif +#if 0 + /* If you mean to use locks as a blocking mechanism for extended + * periods of time, you should do something like this. However, + * it kills the performance of gaelco3d. + */ + if (spin == 0) + { + struct timespec sleep = { 0, 100000 }, remaining; + nanosleep(&sleep, &remaining); // sleep for 100us + } +#endif + } while (osd_compare_exchange_pthread_t(&lock->holder, 0, current) != nullptr); + } + lock->count++; +} + +//============================================================ +// osd_lock_try +//============================================================ + +int osd_lock_try(osd_lock *lock) +{ + pthread_t current, prev; + + current = pthread_self(); + prev = osd_compare_exchange_pthread_t(&lock->holder, 0, current); + if (prev == nullptr || prev == current) + { + lock->count++; + return 1; + } + return 0; +} + +//============================================================ +// osd_lock_release +//============================================================ + +void osd_lock_release(osd_lock *lock) +{ + pthread_t current; + + current = pthread_self(); + if (lock->holder == current) + { + if (--lock->count == 0) +#if defined(__ppc__) || defined(__PPC__) || defined(__ppc64__) || defined(__PPC64__) + lock->holder = 0; + __asm__ __volatile__( " lwsync " : : ); +#else + osd_exchange_pthread_t(&lock->holder, 0); +#endif + return; + } + + // trying to release a lock you don't hold is bad! +// assert(lock->holder == pthread_self()); +} + +//============================================================ +// osd_lock_free +//============================================================ + +void osd_lock_free(osd_lock *lock) +{ + free(lock); +} + //============================================================ // osd_event_alloc //============================================================ diff --git a/src/osd/modules/sync/sync_os2.cpp b/src/osd/modules/sync/sync_os2.cpp index 6e3265bb198..dba34065610 100644 --- a/src/osd/modules/sync/sync_os2.cpp +++ b/src/osd/modules/sync/sync_os2.cpp @@ -31,6 +31,16 @@ #define pthread_t int #define pthread_self _gettid +struct osd_lock { + volatile pthread_t holder; + INT32 count; +#ifdef PTR64 + INT8 padding[52]; // Fill a 64-byte cache line +#else + INT8 padding[56]; // A bit more padding +#endif +}; + struct osd_event { HMTX hmtx; HEV hev; @@ -48,6 +58,276 @@ struct osd_thread { void *param; }; +struct osd_scalable_lock +{ + struct + { + volatile INT32 haslock; // do we have the lock? + INT32 filler[64/4-1]; // assumes a 64-byte cache line + } slot[WORK_MAX_THREADS]; // one slot per thread + volatile INT32 nextindex; // index of next slot to use +}; + + +//============================================================ +// Scalable Locks +//============================================================ + +osd_scalable_lock *osd_scalable_lock_alloc(void) +{ + osd_scalable_lock *lock; + + lock = (osd_scalable_lock *)calloc(1, sizeof(*lock)); + if (lock == NULL) + return NULL; + + memset(lock, 0, sizeof(*lock)); + lock->slot[0].haslock = TRUE; + return lock; +} + + +INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock) +{ + INT32 myslot = (atomic_increment32(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1); + +#if defined(__i386__) || defined(__x86_64__) + register INT32 tmp; + __asm__ __volatile__ ( + "1: clr %[tmp] ;" + " xchg %[haslock], %[tmp] ;" + " test %[tmp], %[tmp] ;" + " jne 3f ;" + "2: mov %[haslock], %[tmp] ;" + " test %[tmp], %[tmp] ;" + " jne 1b ;" + " pause ;" + " jmp 2b ;" + "3: " + : [haslock] "+m" (lock->slot[myslot].haslock) + , [tmp] "=&r" (tmp) + : + : "%cc" + ); +#elif defined(__ppc__) || defined (__PPC__) || defined(__ppc64__) || defined(__PPC64__) + register INT32 tmp; + __asm__ __volatile__ ( + "1: lwarx %[tmp], 0, %[haslock] \n" + " cmpwi %[tmp], 0 \n" + " bne 3f \n" + "2: lwzx %[tmp], 0, %[haslock] \n" + " cmpwi %[tmp], 0 \n" + " bne 1b \n" + " nop \n" + " nop \n" + " b 2b \n" + "3: li %[tmp], 0 \n" + " sync \n" + " stwcx. %[tmp], 0, %[haslock] \n" + " bne- 1b \n" + " eieio \n" + : [tmp] "=&r" (tmp) + : [haslock] "r" (&lock->slot[myslot].haslock) + : "cr0" + ); +#else + INT32 backoff = 1; + while (!osd_compare_exchange32(&lock->slot[myslot].haslock, TRUE, FALSE)) + { + INT32 backcount; + for (backcount = 0; backcount < backoff; backcount++) + osd_yield_processor(); + backoff <<= 1; + } +#endif + return myslot; +} + + +void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot) +{ +#if defined(__i386__) || defined(__x86_64__) + register INT32 tmp = TRUE; + __asm__ __volatile__ ( + " xchg %[haslock], %[tmp] ;" + : [haslock] "+m" (lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock) + , [tmp] "+r" (tmp) + : + ); +#elif defined(__ppc__) || defined (__PPC__) || defined(__ppc64__) || defined(__PPC64__) + lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock = TRUE; + __asm__ __volatile__ ( " eieio " : : ); +#else + osd_exchange32(&lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock, TRUE); +#endif +} + +void osd_scalable_lock_free(osd_scalable_lock *lock) +{ + free(lock); +} + +static inline pthread_t osd_compare_exchange_pthread_t(pthread_t volatile *ptr, pthread_t compare, pthread_t exchange) +{ +#ifdef PTR64 + INT64 result = compare_exchange64((INT64 volatile *)ptr, (INT64)compare, (INT64)exchange); +#else + INT32 result = compare_exchange32((INT32 volatile *)ptr, (INT32)compare, (INT32)exchange); +#endif + return (pthread_t)result; +} + +static inline pthread_t osd_exchange_pthread_t(pthread_t volatile *ptr, pthread_t exchange) +{ +#ifdef PTR64 + INT64 result = osd_exchange64((INT64 volatile *)ptr, (INT64)exchange); +#else + INT32 result = atomic_exchange32((INT32 volatile *)ptr, (INT32)exchange); +#endif + return (pthread_t)result; +} + + +//============================================================ +// osd_lock_alloc +//============================================================ + +osd_lock *osd_lock_alloc(void) +{ + osd_lock *lock; + + lock = (osd_lock *)calloc(1, sizeof(osd_lock)); + if (lock == NULL) + return NULL; + + lock->holder = 0; + lock->count = 0; + + return lock; +} + +//============================================================ +// osd_lock_acquire +//============================================================ + +void osd_lock_acquire(osd_lock *lock) +{ + pthread_t current, prev; + + current = pthread_self(); + prev = osd_compare_exchange_pthread_t(&lock->holder, 0, current); + if (prev != (size_t)NULL && prev != current) + { + do { + register INT32 spin = 10000; // Convenient spin count + register pthread_t tmp; +#if defined(__i386__) || defined(__x86_64__) + __asm__ __volatile__ ( + "1: pause ;" + " mov %[holder], %[tmp] ;" + " test %[tmp], %[tmp] ;" + " loopne 1b ;" + : [spin] "+c" (spin) + , [tmp] "=&r" (tmp) + : [holder] "m" (lock->holder) + : "%cc" + ); +#elif defined(__ppc__) || defined(__PPC__) + __asm__ __volatile__ ( + "1: nop \n" + " nop \n" + " lwzx %[tmp], 0, %[holder] \n" + " cmpwi %[tmp], 0 \n" + " bdnzt eq, 1b \n" + : [spin] "+c" (spin) + , [tmp] "=&r" (tmp) + : [holder] "r" (&lock->holder) + : "cr0" + ); +#elif defined(__ppc64__) || defined(__PPC64__) + __asm__ __volatile__ ( + "1: nop \n" + " nop \n" + " ldx %[tmp], 0, %[holder] \n" + " cmpdi %[tmp], 0 \n" + " bdnzt eq, 1b \n" + : [spin] "+c" (spin) + , [tmp] "=&r" (tmp) + : [holder] "r" (&lock->holder) + : "cr0" + ); +#else + while (--spin > 0 && lock->holder != NULL) + osd_yield_processor(); +#endif +#if 0 + /* If you mean to use locks as a blocking mechanism for extended + * periods of time, you should do something like this. However, + * it kills the performance of gaelco3d. + */ + if (spin == 0) + { + struct timespec sleep = { 0, 100000 }, remaining; + nanosleep(&sleep, &remaining); // sleep for 100us + } +#endif + } while (osd_compare_exchange_pthread_t(&lock->holder, 0, current) != (size_t)NULL); + } + lock->count++; +} + +//============================================================ +// osd_lock_try +//============================================================ + +int osd_lock_try(osd_lock *lock) +{ + pthread_t current, prev; + + current = pthread_self(); + prev = osd_compare_exchange_pthread_t(&lock->holder, 0, current); + if (prev == (size_t)NULL || prev == current) + { + lock->count++; + return 1; + } + return 0; +} + +//============================================================ +// osd_lock_release +//============================================================ + +void osd_lock_release(osd_lock *lock) +{ + pthread_t current; + + current = pthread_self(); + if (lock->holder == current) + { + if (--lock->count == 0) +#if defined(__ppc__) || defined(__PPC__) || defined(__ppc64__) || defined(__PPC64__) + lock->holder = 0; + __asm__ __volatile__( " eieio " : : ); +#else + osd_exchange_pthread_t(&lock->holder, 0); +#endif + return; + } + + // trying to release a lock you don't hold is bad! +// assert(lock->holder == pthread_self()); +} + +//============================================================ +// osd_lock_free +//============================================================ + +void osd_lock_free(osd_lock *lock) +{ + free(lock); +} + //============================================================ // osd_event_alloc //============================================================ diff --git a/src/osd/modules/sync/sync_sdl.cpp b/src/osd/modules/sync/sync_sdl.cpp index 73b735fefb3..90969b2a068 100644 --- a/src/osd/modules/sync/sync_sdl.cpp +++ b/src/osd/modules/sync/sync_sdl.cpp @@ -51,6 +51,138 @@ struct osd_thread { void *param; }; +struct osd_scalable_lock +{ + SDL_mutex * mutex; +}; + +//============================================================ +// Scalable Locks +//============================================================ + +osd_scalable_lock *osd_scalable_lock_alloc(void) +{ + osd_scalable_lock *lock; + + lock = (osd_scalable_lock *)calloc(1, sizeof(*lock)); + if (lock == NULL) + return NULL; + + lock->mutex = SDL_CreateMutex(); + return lock; +} + + +INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock) +{ + SDL_mutexP(lock->mutex); + return 0; +} + + +void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot) +{ + SDL_mutexV(lock->mutex); +} + +void osd_scalable_lock_free(osd_scalable_lock *lock) +{ + SDL_DestroyMutex(lock->mutex); + free(lock); +} + +//============================================================ +// osd_lock_alloc +//============================================================ + +osd_lock *osd_lock_alloc(void) +{ + hidden_mutex_t *mutex; + + mutex = (hidden_mutex_t *)calloc(1, sizeof(hidden_mutex_t)); + if (mutex == NULL) + return NULL; + + mutex->id = SDL_CreateMutex(); + + return (osd_lock *)mutex; +} + +//============================================================ +// osd_lock_acquire +//============================================================ + +void osd_lock_acquire(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + + LOG(("osd_lock_acquire")); + /* get the lock */ + mutex->locked++; /* signal that we are *about* to lock - prevent osd_lock_try */ + SDL_mutexP(mutex->id); + mutex->threadid = SDL_ThreadID(); +} + +//============================================================ +// osd_lock_try +//============================================================ + +int osd_lock_try(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + + LOG(("osd_lock_try")); + if (mutex->locked && mutex->threadid == SDL_ThreadID()) + { + /* get the lock */ + SDL_mutexP(mutex->id); + mutex->locked++; + mutex->threadid = SDL_ThreadID(); + return 1; + } + else if ((mutex->locked == 0)) + { + /* get the lock */ + mutex->locked++; + SDL_mutexP(mutex->id); + mutex->threadid = SDL_ThreadID(); + return 1; + } + else + { + /* fail */ + return 0; + } +} + +//============================================================ +// osd_lock_release +//============================================================ + +void osd_lock_release(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + + LOG(("osd_lock_release")); + mutex->locked--; + if (mutex->locked == 0) + mutex->threadid = -1; + SDL_mutexV(mutex->id); +} + +//============================================================ +// osd_lock_free +//============================================================ + +void osd_lock_free(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + + LOG(("osd_lock_free")); + //osd_lock_release(lock); + SDL_DestroyMutex(mutex->id); + free(mutex); +} //============================================================ // osd_event_alloc @@ -191,7 +323,11 @@ osd_thread *osd_thread_create(osd_thread_callback callback, void *cbparam) return NULL; thread->callback = callback; thread->param = cbparam; +#ifdef SDLMAME_SDL2 thread->thread = SDL_CreateThread(worker_thread_entry, "Thread", thread); +#else + thread->thread = SDL_CreateThread(worker_thread_entry, thread); +#endif if ( thread->thread == NULL ) { free(thread); diff --git a/src/osd/modules/sync/sync_tc.cpp b/src/osd/modules/sync/sync_tc.cpp index 64a6a760e96..06dc699edce 100644 --- a/src/osd/modules/sync/sync_tc.cpp +++ b/src/osd/modules/sync/sync_tc.cpp @@ -58,6 +58,123 @@ struct osd_thread { pthread_t thread; }; +struct osd_scalable_lock +{ + osd_lock *lock; +}; + +//============================================================ +// Scalable Locks +//============================================================ + +osd_scalable_lock *osd_scalable_lock_alloc(void) +{ + osd_scalable_lock *lock; + + lock = (osd_scalable_lock *)calloc(1, sizeof(*lock)); + if (lock == NULL) + return NULL; + + lock->lock = osd_lock_alloc(); + return lock; +} + + +INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock) +{ + osd_lock_acquire(lock->lock); + return 0; +} + + +void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot) +{ + osd_lock_release(lock->lock); +} + +void osd_scalable_lock_free(osd_scalable_lock *lock) +{ + osd_lock_free(lock->lock); + free(lock); +} + + +//============================================================ +// osd_lock_alloc +//============================================================ + +osd_lock *osd_lock_alloc(void) +{ + hidden_mutex_t *mutex; + pthread_mutexattr_t mtxattr; + + mutex = (hidden_mutex_t *)calloc(1, sizeof(hidden_mutex_t)); + if (mutex == NULL) + return NULL; + + pthread_mutexattr_init(&mtxattr); + pthread_mutexattr_settype(&mtxattr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&mutex->id, &mtxattr); + + return (osd_lock *)mutex; +} + +//============================================================ +// osd_lock_acquire +//============================================================ + +void osd_lock_acquire(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + int r; + + r = pthread_mutex_lock(&mutex->id); + if (r==0) + return; + //osd_printf_error("Error on lock: %d: %s\n", r, strerror(r)); +} + +//============================================================ +// osd_lock_try +//============================================================ + +int osd_lock_try(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + int r; + + r = pthread_mutex_trylock(&mutex->id); + if (r==0) + return 1; + //if (r!=EBUSY) + // osd_printf_error("Error on trylock: %d: %s\n", r, strerror(r)); + return 0; +} + +//============================================================ +// osd_lock_release +//============================================================ + +void osd_lock_release(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + + pthread_mutex_unlock(&mutex->id); +} + +//============================================================ +// osd_lock_free +//============================================================ + +void osd_lock_free(osd_lock *lock) +{ + hidden_mutex_t *mutex = (hidden_mutex_t *) lock; + + //pthread_mutex_unlock(&mutex->id); + pthread_mutex_destroy(&mutex->id); + free(mutex); +} + //============================================================ // osd_event_alloc //============================================================ diff --git a/src/osd/modules/sync/sync_windows.cpp b/src/osd/modules/sync/sync_windows.cpp index c744f345e6f..9439d68b72c 100644 --- a/src/osd/modules/sync/sync_windows.cpp +++ b/src/osd/modules/sync/sync_windows.cpp @@ -23,6 +23,8 @@ //============================================================ #define DEBUG_SLOW_LOCKS 0 +#define USE_SCALABLE_LOCKS (0) + //============================================================ @@ -31,6 +33,11 @@ typedef BOOL (WINAPI *try_enter_critical_section_ptr)(LPCRITICAL_SECTION lpCriticalSection); +struct osd_lock +{ + CRITICAL_SECTION critsect; +}; + struct osd_event { void * ptr; @@ -42,6 +49,113 @@ struct osd_thread { void *param; }; +struct osd_scalable_lock +{ +#if USE_SCALABLE_LOCKS + struct + { + volatile INT32 haslock; // do we have the lock? + INT32 filler[64/4-1]; // assumes a 64-byte cache line + } slot[WORK_MAX_THREADS]; // one slot per thread + volatile INT32 nextindex; // index of next slot to use +#else + CRITICAL_SECTION section; +#endif +}; + + +//============================================================ +// GLOBAL VARIABLES +//============================================================ + +static try_enter_critical_section_ptr try_enter_critical_section = nullptr; +static int checked_for_try_enter = FALSE; + + + +//============================================================ +// osd_lock_alloc +//============================================================ + +osd_lock *osd_lock_alloc(void) +{ + osd_lock *lock = (osd_lock *)malloc(sizeof(*lock)); + if (lock == nullptr) + return nullptr; + InitializeCriticalSection(&lock->critsect); + return lock; +} + + +//============================================================ +// osd_lock_acquire +//============================================================ + +void osd_lock_acquire(osd_lock *lock) +{ +#if DEBUG_SLOW_LOCKS + osd_ticks_t ticks = osd_ticks(); +#endif + + // block until we can acquire the lock + EnterCriticalSection(&lock->critsect); + +#if DEBUG_SLOW_LOCKS + // log any locks that take more than 1ms + ticks = osd_ticks() - ticks; + if (ticks > osd_ticks_per_second() / 1000) osd_printf_debug("Blocked %d ticks on lock acquire\n", (int)ticks); +#endif +} + + +//============================================================ +// osd_lock_try +//============================================================ + +int osd_lock_try(osd_lock *lock) +{ + int result = TRUE; + + // if we haven't yet checked for the TryEnter API, do it now + if (!checked_for_try_enter) + { + // see if we can use TryEnterCriticalSection + HMODULE library = LoadLibrary(TEXT("kernel32.dll")); + if (library != nullptr) + try_enter_critical_section = (try_enter_critical_section_ptr)GetProcAddress(library, "TryEnterCriticalSection"); + checked_for_try_enter = TRUE; + } + + // if we have it, use it, otherwise just block + if (try_enter_critical_section != nullptr) + result = (*try_enter_critical_section)(&lock->critsect); + else + EnterCriticalSection(&lock->critsect); + return result; +} + + +//============================================================ +// osd_lock_release +//============================================================ + +void osd_lock_release(osd_lock *lock) +{ + LeaveCriticalSection(&lock->critsect); +} + + +//============================================================ +// osd_lock_free +//============================================================ + +void osd_lock_free(osd_lock *lock) +{ + DeleteCriticalSection(&lock->critsect); + free(lock); +} + + //============================================================ // win_compare_exchange32 //============================================================ @@ -203,3 +317,66 @@ int osd_thread_cpu_affinity(osd_thread *thread, UINT32 mask) { return TRUE; } + +//============================================================ +// Scalable Locks +//============================================================ + +osd_scalable_lock *osd_scalable_lock_alloc(void) +{ + osd_scalable_lock *lock; + + lock = (osd_scalable_lock *)calloc(1, sizeof(*lock)); + if (lock == nullptr) + return nullptr; + + memset(lock, 0, sizeof(*lock)); +#if USE_SCALABLE_LOCKS + lock->slot[0].haslock = TRUE; +#else + InitializeCriticalSection(&lock->section); +#endif + return lock; +} + + +INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock) +{ +#if USE_SCALABLE_LOCKS + INT32 myslot = (atomic_increment32(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1); + INT32 backoff = 1; + + while (!lock->slot[myslot].haslock) + { + INT32 backcount; + for (backcount = 0; backcount < backoff; backcount++) + osd_yield_processor(); + backoff <<= 1; + } + lock->slot[myslot].haslock = FALSE; + return myslot; +#else + EnterCriticalSection(&lock->section); + return 0; +#endif +} + + +void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot) +{ +#if USE_SCALABLE_LOCKS + atomic_exchange32(&lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock, TRUE); +#else + LeaveCriticalSection(&lock->section); +#endif +} + + +void osd_scalable_lock_free(osd_scalable_lock *lock) +{ +#if USE_SCALABLE_LOCKS +#else + DeleteCriticalSection(&lock->section); +#endif + free(lock); +} diff --git a/src/osd/modules/sync/work_osd.cpp b/src/osd/modules/sync/work_osd.cpp index 3238fdfe792..e5582584666 100644 --- a/src/osd/modules/sync/work_osd.cpp +++ b/src/osd/modules/sync/work_osd.cpp @@ -18,7 +18,6 @@ #include <stdint.h> #endif #endif -#include <mutex> // MAME headers #include "osdcore.h" @@ -110,7 +109,7 @@ struct work_thread_info struct osd_work_queue { - std::mutex *lock; // lock for protecting the queue + osd_scalable_lock * lock; // lock for protecting the queue osd_work_item * volatile list; // list of items in the queue osd_work_item ** volatile tailptr; // pointer to the tail pointer of work items in the queue osd_work_item * volatile free; // free list of work items @@ -189,7 +188,7 @@ osd_work_queue *osd_work_queue_alloc(int flags) goto error; // initialize the critical section - queue->lock = new std::mutex(); + queue->lock = osd_scalable_lock_alloc(); if (queue->lock == NULL) goto error; @@ -422,7 +421,7 @@ void osd_work_queue_free(osd_work_queue *queue) printf("Spin loops = %9d\n", queue->spinloops); #endif - delete queue->lock; + osd_scalable_lock_free(queue->lock); // free the queue itself osd_free(queue); } @@ -436,6 +435,7 @@ osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_call { osd_work_item *itemlist = NULL, *lastitem = NULL; osd_work_item **item_tailptr = &itemlist; + INT32 lockslot; int itemnum; // loop over items, building up a local list of work @@ -444,14 +444,12 @@ osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_call osd_work_item *item; // first allocate a new work item; try the free list first + INT32 myslot = osd_scalable_lock_acquire(queue->lock); + do { - queue->lock->lock(); - do - { - item = (osd_work_item *)queue->free; - } while (item != NULL && compare_exchange_ptr((PVOID volatile *)&queue->free, item, item->next) != item); - queue->lock->unlock(); - } + item = (osd_work_item *)queue->free; + } while (item != NULL && compare_exchange_ptr((PVOID volatile *)&queue->free, item, item->next) != item); + osd_scalable_lock_release(queue->lock, myslot); // if nothing, allocate something new if (item == NULL) @@ -484,12 +482,10 @@ osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_call } // enqueue the whole thing within the critical section - { - queue->lock->lock(); - *queue->tailptr = itemlist; - queue->tailptr = item_tailptr; - queue->lock->unlock(); - } + lockslot = osd_scalable_lock_acquire(queue->lock); + *queue->tailptr = itemlist; + queue->tailptr = item_tailptr; + osd_scalable_lock_release(queue->lock, lockslot); // increment the number of items in the queue atomic_add32(&queue->items, numitems); @@ -543,9 +539,9 @@ int osd_work_item_wait(osd_work_item *item, osd_ticks_t timeout) // if we don't have an event, create one if (item->event == NULL) { - item->queue->lock->lock(); + INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); item->event = osd_event_alloc(TRUE, FALSE); // manual reset, not signalled - item->queue->lock->unlock(); + osd_scalable_lock_release(item->queue->lock, lockslot); } else osd_event_reset(item->event); @@ -588,13 +584,13 @@ void osd_work_item_release(osd_work_item *item) osd_work_item_wait(item, 100 * osd_ticks_per_second()); // add us to the free list on our queue - item->queue->lock->lock(); + INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); do { next = (osd_work_item *)item->queue->free; item->next = next; } while (compare_exchange_ptr((PVOID volatile *)&item->queue->free, next, item) != next); - item->queue->lock->unlock(); + osd_scalable_lock_release(item->queue->lock, lockslot); } @@ -715,7 +711,7 @@ static void worker_thread_process(osd_work_queue *queue, work_thread_info *threa // use a critical section to synchronize the removal of items { - queue->lock->lock(); + INT32 lockslot = osd_scalable_lock_acquire(queue->lock); if (queue->list == NULL) { end_loop = true; @@ -731,7 +727,7 @@ static void worker_thread_process(osd_work_queue *queue, work_thread_info *threa queue->tailptr = (osd_work_item **)&queue->list; } } - queue->lock->unlock(); + osd_scalable_lock_release(queue->lock, lockslot); } if (end_loop) @@ -757,13 +753,13 @@ static void worker_thread_process(osd_work_queue *queue, work_thread_info *threa // set the result and signal the event else { - queue->lock->lock(); + INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); if (item->event != NULL) { osd_event_set(item->event); add_to_stat(&item->queue->setevents, 1); } - queue->lock->unlock(); + osd_scalable_lock_release(item->queue->lock, lockslot); } // if we removed an item and there's still work to do, bump the stats @@ -784,8 +780,8 @@ static void worker_thread_process(osd_work_queue *queue, work_thread_info *threa bool queue_has_list_items(osd_work_queue *queue) { - queue->lock->lock(); + INT32 lockslot = osd_scalable_lock_acquire(queue->lock); bool has_list_items = (queue->list != NULL); - queue->lock->unlock(); + osd_scalable_lock_release(queue->lock, lockslot); return has_list_items; } |