summaryrefslogtreecommitdiffstatshomepage
path: root/src/osd/modules/sync/sync_os2.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/osd/modules/sync/sync_os2.cpp')
-rw-r--r--src/osd/modules/sync/sync_os2.cpp280
1 files changed, 280 insertions, 0 deletions
diff --git a/src/osd/modules/sync/sync_os2.cpp b/src/osd/modules/sync/sync_os2.cpp
index 6e3265bb198..dba34065610 100644
--- a/src/osd/modules/sync/sync_os2.cpp
+++ b/src/osd/modules/sync/sync_os2.cpp
@@ -31,6 +31,16 @@
#define pthread_t int
#define pthread_self _gettid
+struct osd_lock {
+ volatile pthread_t holder;
+ INT32 count;
+#ifdef PTR64
+ INT8 padding[52]; // Fill a 64-byte cache line
+#else
+ INT8 padding[56]; // A bit more padding
+#endif
+};
+
struct osd_event {
HMTX hmtx;
HEV hev;
@@ -48,6 +58,276 @@ struct osd_thread {
void *param;
};
+struct osd_scalable_lock
+{
+ struct
+ {
+ volatile INT32 haslock; // do we have the lock?
+ INT32 filler[64/4-1]; // assumes a 64-byte cache line
+ } slot[WORK_MAX_THREADS]; // one slot per thread
+ volatile INT32 nextindex; // index of next slot to use
+};
+
+
+//============================================================
+// Scalable Locks
+//============================================================
+
+osd_scalable_lock *osd_scalable_lock_alloc(void)
+{
+ osd_scalable_lock *lock;
+
+ lock = (osd_scalable_lock *)calloc(1, sizeof(*lock));
+ if (lock == NULL)
+ return NULL;
+
+ memset(lock, 0, sizeof(*lock));
+ lock->slot[0].haslock = TRUE;
+ return lock;
+}
+
+
+INT32 osd_scalable_lock_acquire(osd_scalable_lock *lock)
+{
+ INT32 myslot = (atomic_increment32(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1);
+
+#if defined(__i386__) || defined(__x86_64__)
+ register INT32 tmp;
+ __asm__ __volatile__ (
+ "1: clr %[tmp] ;"
+ " xchg %[haslock], %[tmp] ;"
+ " test %[tmp], %[tmp] ;"
+ " jne 3f ;"
+ "2: mov %[haslock], %[tmp] ;"
+ " test %[tmp], %[tmp] ;"
+ " jne 1b ;"
+ " pause ;"
+ " jmp 2b ;"
+ "3: "
+ : [haslock] "+m" (lock->slot[myslot].haslock)
+ , [tmp] "=&r" (tmp)
+ :
+ : "%cc"
+ );
+#elif defined(__ppc__) || defined (__PPC__) || defined(__ppc64__) || defined(__PPC64__)
+ register INT32 tmp;
+ __asm__ __volatile__ (
+ "1: lwarx %[tmp], 0, %[haslock] \n"
+ " cmpwi %[tmp], 0 \n"
+ " bne 3f \n"
+ "2: lwzx %[tmp], 0, %[haslock] \n"
+ " cmpwi %[tmp], 0 \n"
+ " bne 1b \n"
+ " nop \n"
+ " nop \n"
+ " b 2b \n"
+ "3: li %[tmp], 0 \n"
+ " sync \n"
+ " stwcx. %[tmp], 0, %[haslock] \n"
+ " bne- 1b \n"
+ " eieio \n"
+ : [tmp] "=&r" (tmp)
+ : [haslock] "r" (&lock->slot[myslot].haslock)
+ : "cr0"
+ );
+#else
+ INT32 backoff = 1;
+ while (!osd_compare_exchange32(&lock->slot[myslot].haslock, TRUE, FALSE))
+ {
+ INT32 backcount;
+ for (backcount = 0; backcount < backoff; backcount++)
+ osd_yield_processor();
+ backoff <<= 1;
+ }
+#endif
+ return myslot;
+}
+
+
+void osd_scalable_lock_release(osd_scalable_lock *lock, INT32 myslot)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ register INT32 tmp = TRUE;
+ __asm__ __volatile__ (
+ " xchg %[haslock], %[tmp] ;"
+ : [haslock] "+m" (lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock)
+ , [tmp] "+r" (tmp)
+ :
+ );
+#elif defined(__ppc__) || defined (__PPC__) || defined(__ppc64__) || defined(__PPC64__)
+ lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock = TRUE;
+ __asm__ __volatile__ ( " eieio " : : );
+#else
+ osd_exchange32(&lock->slot[(myslot + 1) & (WORK_MAX_THREADS - 1)].haslock, TRUE);
+#endif
+}
+
+void osd_scalable_lock_free(osd_scalable_lock *lock)
+{
+ free(lock);
+}
+
+static inline pthread_t osd_compare_exchange_pthread_t(pthread_t volatile *ptr, pthread_t compare, pthread_t exchange)
+{
+#ifdef PTR64
+ INT64 result = compare_exchange64((INT64 volatile *)ptr, (INT64)compare, (INT64)exchange);
+#else
+ INT32 result = compare_exchange32((INT32 volatile *)ptr, (INT32)compare, (INT32)exchange);
+#endif
+ return (pthread_t)result;
+}
+
+static inline pthread_t osd_exchange_pthread_t(pthread_t volatile *ptr, pthread_t exchange)
+{
+#ifdef PTR64
+ INT64 result = osd_exchange64((INT64 volatile *)ptr, (INT64)exchange);
+#else
+ INT32 result = atomic_exchange32((INT32 volatile *)ptr, (INT32)exchange);
+#endif
+ return (pthread_t)result;
+}
+
+
+//============================================================
+// osd_lock_alloc
+//============================================================
+
+osd_lock *osd_lock_alloc(void)
+{
+ osd_lock *lock;
+
+ lock = (osd_lock *)calloc(1, sizeof(osd_lock));
+ if (lock == NULL)
+ return NULL;
+
+ lock->holder = 0;
+ lock->count = 0;
+
+ return lock;
+}
+
+//============================================================
+// osd_lock_acquire
+//============================================================
+
+void osd_lock_acquire(osd_lock *lock)
+{
+ pthread_t current, prev;
+
+ current = pthread_self();
+ prev = osd_compare_exchange_pthread_t(&lock->holder, 0, current);
+ if (prev != (size_t)NULL && prev != current)
+ {
+ do {
+ register INT32 spin = 10000; // Convenient spin count
+ register pthread_t tmp;
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__ (
+ "1: pause ;"
+ " mov %[holder], %[tmp] ;"
+ " test %[tmp], %[tmp] ;"
+ " loopne 1b ;"
+ : [spin] "+c" (spin)
+ , [tmp] "=&r" (tmp)
+ : [holder] "m" (lock->holder)
+ : "%cc"
+ );
+#elif defined(__ppc__) || defined(__PPC__)
+ __asm__ __volatile__ (
+ "1: nop \n"
+ " nop \n"
+ " lwzx %[tmp], 0, %[holder] \n"
+ " cmpwi %[tmp], 0 \n"
+ " bdnzt eq, 1b \n"
+ : [spin] "+c" (spin)
+ , [tmp] "=&r" (tmp)
+ : [holder] "r" (&lock->holder)
+ : "cr0"
+ );
+#elif defined(__ppc64__) || defined(__PPC64__)
+ __asm__ __volatile__ (
+ "1: nop \n"
+ " nop \n"
+ " ldx %[tmp], 0, %[holder] \n"
+ " cmpdi %[tmp], 0 \n"
+ " bdnzt eq, 1b \n"
+ : [spin] "+c" (spin)
+ , [tmp] "=&r" (tmp)
+ : [holder] "r" (&lock->holder)
+ : "cr0"
+ );
+#else
+ while (--spin > 0 && lock->holder != NULL)
+ osd_yield_processor();
+#endif
+#if 0
+ /* If you mean to use locks as a blocking mechanism for extended
+ * periods of time, you should do something like this. However,
+ * it kills the performance of gaelco3d.
+ */
+ if (spin == 0)
+ {
+ struct timespec sleep = { 0, 100000 }, remaining;
+ nanosleep(&sleep, &remaining); // sleep for 100us
+ }
+#endif
+ } while (osd_compare_exchange_pthread_t(&lock->holder, 0, current) != (size_t)NULL);
+ }
+ lock->count++;
+}
+
+//============================================================
+// osd_lock_try
+//============================================================
+
+int osd_lock_try(osd_lock *lock)
+{
+ pthread_t current, prev;
+
+ current = pthread_self();
+ prev = osd_compare_exchange_pthread_t(&lock->holder, 0, current);
+ if (prev == (size_t)NULL || prev == current)
+ {
+ lock->count++;
+ return 1;
+ }
+ return 0;
+}
+
+//============================================================
+// osd_lock_release
+//============================================================
+
+void osd_lock_release(osd_lock *lock)
+{
+ pthread_t current;
+
+ current = pthread_self();
+ if (lock->holder == current)
+ {
+ if (--lock->count == 0)
+#if defined(__ppc__) || defined(__PPC__) || defined(__ppc64__) || defined(__PPC64__)
+ lock->holder = 0;
+ __asm__ __volatile__( " eieio " : : );
+#else
+ osd_exchange_pthread_t(&lock->holder, 0);
+#endif
+ return;
+ }
+
+ // trying to release a lock you don't hold is bad!
+// assert(lock->holder == pthread_self());
+}
+
+//============================================================
+// osd_lock_free
+//============================================================
+
+void osd_lock_free(osd_lock *lock)
+{
+ free(lock);
+}
+
//============================================================
// osd_event_alloc
//============================================================