summaryrefslogtreecommitdiffstatshomepage
path: root/src/osd/modules/sync/sync_windows.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/osd/modules/sync/sync_windows.cpp')
-rw-r--r--src/osd/modules/sync/sync_windows.cpp177
1 files changed, 177 insertions, 0 deletions
diff --git a/src/osd/modules/sync/sync_windows.cpp b/src/osd/modules/sync/sync_windows.cpp
index c744f345e6f..9439d68b72c 100644
--- a/src/osd/modules/sync/sync_windows.cpp
+++ b/src/osd/modules/sync/sync_windows.cpp
@@ -23,6 +23,8 @@
//============================================================
#define DEBUG_SLOW_LOCKS 0
+#define USE_SCALABLE_LOCKS (0)
+
//============================================================
@@ -31,6 +33,11 @@
typedef BOOL (WINAPI *try_enter_critical_section_ptr)(LPCRITICAL_SECTION lpCriticalSection);
+struct osd_lock
+{
+ CRITICAL_SECTION critsect;
+};
+
struct osd_event
{
void * ptr;
@@ -42,6 +49,113 @@ struct osd_thread {
void *param;
};
+struct osd_scalable_lock
+{
+#if USE_SCALABLE_LOCKS
+ struct
+ {
+ volatile INT32 haslock; // do we have the lock?
+ INT32 filler[64/4-1]; // assumes a 64-byte cache line
+ } slot[WORK_MAX_THREADS]; // one slot per thread
+ volatile INT32 nextindex; // index of next slot to use
+#else
+ CRITICAL_SECTION section;
+#endif
+};
+
+
+//============================================================
+// GLOBAL VARIABLES
+//============================================================
+
+static try_enter_critical_section_ptr try_enter_critical_section = nullptr;
+static int checked_for_try_enter = FALSE;
+
+
+
+//============================================================
+// osd_lock_alloc
+//============================================================
+
+osd_lock *osd_lock_alloc(void)
+{
+ osd_lock *lock = (osd_lock *)malloc(sizeof(*lock));
+ if (lock == nullptr)
+ return nullptr;
+ InitializeCriticalSection(&lock->critsect);
+ return lock;
+}
+
+
+//============================================================
+// osd_lock_acquire
+//============================================================
+
+void osd_lock_acquire(osd_lock *lock)
+{
+#if DEBUG_SLOW_LOCKS
+ osd_ticks_t ticks = osd_ticks();
+#endif
+
+ // block until we can acquire the lock
+ EnterCriticalSection(&lock->critsect);
+
+#if DEBUG_SLOW_LOCKS
+ // log any locks that take more than 1ms
+ ticks = osd_ticks() - ticks;
+ if (ticks > osd_ticks_per_second() / 1000) osd_printf_debug("Blocked %d ticks on lock acquire\n", (int)ticks);
+#endif
+}
+
+
+//============================================================
+// osd_lock_try
+//============================================================
+
+int osd_lock_try(osd_lock *lock)
+{
+ int result = TRUE;
+
+ // if we haven't yet checked for the TryEnter API, do it now
+ if (!checked_for_try_enter)
+ {
+ // see if we can use TryEnterCriticalSection
+ HMODULE library = LoadLibrary(TEXT("kernel32.dll"));
+ if (library != nullptr)
+ try_enter_critical_section = (try_enter_critical_section_ptr)GetProcAddress(library, "TryEnterCriticalSection");
+ checked_for_try_enter = TRUE;
+ }
+
+ // if we have it, use it, otherwise just block
+ if (try_enter_critical_section != nullptr)
+ result = (*try_enter_critical_section)(&lock->critsect);
+ else
+ EnterCriticalSection(&lock->critsect);
+ return result;
+}
+
+
+//============================================================
+// osd_lock_release
+//============================================================
+
+void osd_lock_release(osd_lock *lock)
+{
+ LeaveCriticalSection(&lock->critsect);
+}
+
+
+//============================================================
+// osd_lock_free
+//============================================================
+
+void osd_lock_free(osd_lock *lock)
+{
+ DeleteCriticalSection(&lock->critsect);
+ free(lock);
+}
+
+
//============================================================
// win_compare_exchange32
//============================================================
@@ -203,3 +317,66 @@ int osd_thread_cpu_affinity(osd_thread *thread, UINT32 mask)
{
return TRUE;
}
+
+//============================================================
+// Scalable Locks
+//============================================================
+
+osd_scalable_lock *osd_scalable_lock_alloc(void)
+{
+ osd_scalable_lock *lock;
+
+ lock = (osd_scalable_lock *)calloc(1, sizeof(*lock));
+ if (lock == nullptr)
+ return nullptr;
+
+ memset(lock, 0, sizeof(*lock));
+#if USE_SCALABLE_LOCKS
+ lock->slot[0].haslock = TRUE;
+#else
+ InitializeCriticalSection(&lock->section);
+#endif
+ return lock;
+}
+
+
+INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock)
+{
+#if USE_SCALABLE_LOCKS
+ INT32 myslot = (atomic_increment32(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1);
+ INT32 backoff = 1;
+
+ while (!lock->slot[myslot].haslock)
+ {
+ INT32 backcount;
+ for (backcount = 0; backcount < backoff; backcount++)
+ osd_yield_processor();
+ backoff <<= 1;
+ }
+ lock->slot[myslot].haslock = FALSE;
+ return myslot;
+#else
+ EnterCriticalSection(&lock->section);
+ return 0;
+#endif
+}
+
+
+void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot)
+{
+#if USE_SCALABLE_LOCKS
+ atomic_exchange32(&lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock, TRUE);
+#else
+ LeaveCriticalSection(&lock->section);
+#endif
+}
+
+
+void osd_scalable_lock_free(osd_scalable_lock *lock)
+{
+#if USE_SCALABLE_LOCKS
+#else
+ DeleteCriticalSection(&lock->section);
+#endif
+ free(lock);
+}