Found 171714 results in 23333 files, showing top 50 files (show more).
github.com/html5rocks/www.html5rocks.com:static/demos/lemdoodle/examples/lem-planes/combined.js: [ master, ]
5051:   'needle': ['intro-finale/items-needle-thread', 'intro-finale/items-needle'],
5086:   'thread': ['intro-finale/items-thread', 'intro-finale/items-thread'],
5084:   'thread-thimble': ['intro-finale/items-thread-thimble',
5085:                      'intro-finale/items-thread-thimble'],
5102:     'needle', 'halo', 'noodles', 'neutron', 'nose'
5105:     'thread', 'pinstripe', 'neutron', 'noodles', 'clove'
5125:     'thread', 'thimble', 'earmuffs', 'neutron', 'nose'
5131:     'thread', 'thimble', 'noodles', 'neutron', 'nose', 'gnocchi', 'rivet',
13797:     'intro-finale/items-needle-thread',
13798:     'intro-finale/items-needle',
13839:     'intro-finale/items-thread-thimble',
13840:     'intro-finale/items-thread',
15100:   'intro-finale/items-needle-thread': { width: 68, height: 65, x: 834, y: 0 },
15101:   'intro-finale/items-needle': { top: 8, width: 59, height: 51, x: 904, y: 0 },
15142:   'intro-finale/items-thread-thimble': { left: 2, top: 11, bottom: 14, right: 11, width: 52, height: 40, x: 690, y: 96 },
15143:   'intro-finale/items-thread': { left: 4, top: 16, bottom: 15, right: 13, width: 48, height: 34, x: 744, y: 96 },
github.com/html5rocks/www.html5rocks.com:static/demos/lemdoodle/examples/lem-embedded/combined.js: [ master, ]
5024:   'needle': ['intro-finale/items-needle-thread', 'intro-finale/items-needle'],
5059:   'thread': ['intro-finale/items-thread', 'intro-finale/items-thread'],
5057:   'thread-thimble': ['intro-finale/items-thread-thimble',
5058:                      'intro-finale/items-thread-thimble'],
5075:     'needle', 'halo', 'noodles', 'neutron', 'nose'
5078:     'thread', 'pinstripe', 'neutron', 'noodles', 'clove'
5098:     'thread', 'thimble', 'earmuffs', 'neutron', 'nose'
5104:     'thread', 'thimble', 'noodles', 'neutron', 'nose', 'gnocchi', 'rivet',
13827:               // First run: Thread + thimble
13828:               nItems = ['thread', 'thimble'];
13831:               // Second run: Thread/thimble + random item that’s neither
13832:               nItems[0] = 'thread-thimble';
13836:               } while ((nItems[1] == 'thread-thimble') ||
13837:                        (nItems[1] == 'thread') || (nItems[1] == 'thimble'));
13843:             nItems = ['needle', 'noodles'];
13849:               nItems = ['thread', 'thimble'];
13852:               nItems = ['thread-thimble', 'forget-me-not'];
13859:               nItems = ['thread', 'thimble'];
13862:               nItems = ['thread-thimble', 'scissors'];
13868:             nItems = ['thread', 'pinstripe'];
13884:               nItems = ['thread', 'thimble'];
13887:               nItems = ['thread-thimble', 'neutron'];
13894:               nItems = ['thread', 'thimble'];
13897:               nItems = ['thread-thimble', 'basket'];
15362:     'intro-finale/items-needle-thread',
15363:     'intro-finale/items-needle',
15404:     'intro-finale/items-thread-thimble',
15405:     'intro-finale/items-thread',
16665:   'intro-finale/items-needle-thread': { width: 68, height: 65, x: 834, y: 0 },
16666:   'intro-finale/items-needle': { top: 8, width: 59, height: 51, x: 904, y: 0 },
16707:   'intro-finale/items-thread-thimble': { left: 2, top: 11, bottom: 14, right: 11, width: 52, height: 40, x: 690, y: 96 },
16708:   'intro-finale/items-thread': { left: 4, top: 16, bottom: 15, right: 13, width: 48, height: 34, x: 744, y: 96 },
chromium.googlesource.com/ios-chromium-mirror:native_client_sdk/src/libraries/third_party/pthreads-win32/implement.h: [ master, ]
185:   DWORD thread;			/* Win32 thread ID */
502:   ptw32_thread_t * thread;
158:   HANDLE threadH;		/* Win32 thread handle - POSIX thread is invalid if threadH == 0 */
162:   ptw32_mcs_lock_t threadLock;	/* Used for serialised access to public thread state */
246:   pthread_t ownerThread;
346:   void *threads;
350: typedef struct ThreadParms ThreadParms;
352: struct ThreadParms
401: typedef struct ThreadKeyAssoc ThreadKeyAssoc;
403: struct ThreadKeyAssoc
505:   ThreadKeyAssoc *nextThread;
507:   ThreadKeyAssoc *prevThread;
152: typedef struct ptw32_thread_t_       ptw32_thread_t;
155: struct ptw32_thread_t_
575: #define PTW32_THREAD_REUSE_EMPTY ((ptw32_thread_t *)(size_t) 1)
198: struct pthread_attr_t_
234: struct pthread_mutex_t_
273: struct pthread_mutexattr_t_
302: struct pthread_spinlock_t_
326: struct pthread_barrier_t_
336: struct pthread_barrierattr_t_
341: struct pthread_key_t_
360: struct pthread_cond_t_
378: struct pthread_condattr_t_
385: struct pthread_rwlock_t_
396: struct pthread_rwlockattr_t_
133:   PThreadStateInitial = 0,	/* Thread not running                   */
134:   PThreadStateRunning,		/* Thread alive & kicking               */
135:   PThreadStateSuspended,	/* Thread alive but suspended           */
136:   PThreadStateCancelPending,	/* Thread alive but                     */
138:   PThreadStateCanceling,	/* Thread alive but is                  */
141:   PThreadStateExiting,		/* Thread alive but exiting             */
143:   PThreadStateLast,             /* All handlers have been run and now   */
145:   PThreadStateReuse             /* In reuse pool.                       */
147: PThreadState;
944: #define _beginthreadex(security, \
957: #define _endthreadex ExitThread
129:    * This enumeration represents the state of the thread;
130:    * The thread is still "alive" if the numeric value of the
157:   unsigned __int64 seqNumber;	/* Process-unique thread sequence number */
159:   pthread_t ptHandle;		/* This thread's permanent pthread_t handle */
160:   ptw32_thread_t * prevReuse;	/* Links threads on reuse stack */
242:   int recursive_count;		/* Number of unlocks a thread needs to perform
263:  * Node used to manage per-thread lists of currently-held robust mutexes.
407:    *      This structure creates an association between a thread and a key.
409:    *      destroy routine for thread specific data registered by a user upon
410:    *      exiting a thread.
416:    *         T - Thread that has called pthread_setspecific(Kn)
417:    *            (head of chain is thread->keys)
436:    *      general lock (guarding the row) and the thread's general
440:    *      until both the key is deleted and the thread has called the
442:    *      to be freed as soon as either thread or key is concluded.
445:    *      key and thread locks are acquired consistently in the order
446:    *      "key lock then thread lock". An exception to this exists
447:    *      when a thread calls the destructors, however, this is done
450:    *      An association is created when a thread first calls
455:    *      thread calls the key destructor function on thread exit, or
459:    *      thread
460:    *              reference to the thread that owns the
462:    *              thread struct itself. Since the association is
463:    *              destroyed before the thread exits, this can never
464:    *              point to a different logical thread to the one that
465:    *              created the assoc, i.e. after thread struct reuse.
492:    *      1)      As soon as either the key or the thread is no longer
574: /* Thread Reuse stack bottom marker. Must not be NULL or any valid pointer to memory. */
578: extern ptw32_thread_t * ptw32_threadReuseTop;
579: extern ptw32_thread_t * ptw32_threadReuseBottom;
593: extern ptw32_mcs_lock_t ptw32_thread_reuse_lock;
626:   void ptw32_robust_mutex_remove(pthread_mutex_t* mutex, ptw32_thread_t* otp);
644:   void ptw32_threadReusePush (pthread_t thread);
648:   int ptw32_setthreadpriority (pthread_t thread, int policy, int priority);
659:   void ptw32_callUserDestroyRoutines (pthread_t thread);
661:   int ptw32_tkAssocCreate (ptw32_thread_t * thread, pthread_key_t key);
6:  * Keeps all the internals out of pthread.h
10:  *      Pthreads-win32 - POSIX Threads Library for Win32
224:   pthread_mutex_t lock;
248: 				   threads. */
267:   pthread_mutex_t mx;
308:     pthread_mutex_t mutex;	/* mutex if single cpu.            */
354:   pthread_t tid;
362:   long nWaitersBlocked;		/* Number of threads blocked            */
363:   long nWaitersGone;		/* Number of threads timed out          */
364:   long nWaitersToUnblock;	/* Number of threads to unblock         */
365:   sem_t semBlockQueue;		/* Queue up threads waiting for the     */
370:   pthread_mutex_t mtxUnblockLock;	/* Mutex that guards access to          */
373:   pthread_cond_t next;		/* Doubly linked list                   */
374:   pthread_cond_t prev;
387:   pthread_mutex_t mtxExclusiveAccess;
388:   pthread_mutex_t mtxSharedAccessCompleted;
389:   pthread_cond_t cndSharedAccessCompleted;
415:    *            (head of chain is key->threads)
451:    *      pthread_setspecific() on a key that has a specified
471:    *              The pthread_t->keys attribute is the head of a
474:    *              between a pthread_t and all pthread_key_t on which
475:    *              it called pthread_setspecific.
480:    *      nextThread
481:    *              The pthread_key_t->threads attribute is the head of
484:    *              relationship between a pthread_key_t and all the 
485:    *              PThreads that have called pthread_setspecific for
486:    *              this pthread_key_t.
488:    *      prevThread
497:    *              pthread_setspecific if the user provided a
503:   pthread_key_t key;
504:   ThreadKeyAssoc *nextKey;
506:   ThreadKeyAssoc *prevKey;
571: /* Declared in pthread_cancel.c */
580: extern pthread_key_t ptw32_selfThreadKey;
581: extern pthread_key_t ptw32_cleanupKey;
582: extern pthread_cond_t ptw32_cond_list_head;
583: extern pthread_cond_t ptw32_cond_list_tail;
587: extern unsigned __int64 ptw32_threadSeqNumber;
601: extern int pthread_count;
617:   int ptw32_is_attr (const pthread_attr_t * attr);
619:   int ptw32_cond_check_need_init (pthread_cond_t * cond);
620:   int ptw32_mutex_check_need_init (pthread_mutex_t * mutex);
621:   int ptw32_rwlock_check_need_init (pthread_rwlock_t * rwlock);
622:   int ptw32_spinlock_check_need_init (pthread_spinlock_t * lock);
624:   int ptw32_robust_mutex_inherit(pthread_mutex_t * mutex);
625:   void ptw32_robust_mutex_add(pthread_mutex_t* mutex, pthread_t self);
630: 			       HANDLE threadH, DWORD callback_arg);
636:   void ptw32_threadDestroy (pthread_t tid);
640:   pthread_t ptw32_new (void);
642:   pthread_t ptw32_threadReusePop (void);
657:     ptw32_threadStart (void *vthreadParms);
663:   void ptw32_tkAssocDestroy (ThreadKeyAssoc * assoc);
714:   _CRTIMP unsigned long __cdecl _beginthread (void (__cdecl *) (void *),
716:   _CRTIMP void __cdecl _endthread (void);
937: #if defined(NEED_CREATETHREAD)
940:  * Macro uses args so we can cast start_proc to LPTHREAD_START_ROUTINE
950:         CreateThread(security, \
952:                      (LPTHREAD_START_ROUTINE) start_proc, \
959: #endif				/* NEED_CREATETHREAD */
12:  *      Copyright(C) 1999,2005 Pthreads-win32 contributors
20:  *      http://sources.redhat.com/pthreads-win32/contributors.html
131:    * state is greater or equal "PThreadStateRunning".
161:   volatile PThreadState state;
483:    *              nextThreads link. This chain provides the 1 to many
717:   _CRTIMP unsigned long __cdecl _beginthreadex (void *, unsigned,
720:   _CRTIMP void __cdecl _endthreadex (unsigned);
chromium.googlesource.com/chromium/src:native_client_sdk/src/libraries/third_party/pthreads-win32/implement.h: [ master, ] Duplicate result
chromium.googlesource.com/arc/arc:third_party/chromium-ppapi/native_client_sdk/src/libraries/third_party/pthreads-win32/implement.h: [ master, ] Duplicate result
github.com/google/grumpy:third_party/stdlib/threading.py: [ master, ]
631: class Thread(_Verbose):
35: _start_new_thread = thread.start_new_thread
1148: current_thread = currentThread
38: ThreadError = thread.error
1071: class _MainThread(Thread):
1096: def _pickSomeNonDaemonThread():
1111: class _DummyThread(Thread):
1135: def currentThread():
1365:     class ProducerThread(Thread):
1381:     class ConsumerThread(Thread):
1: """Thread module emulating a subset of Java's threading model."""
6:     import thread
31:            'current_thread', 'enumerate', 'Event',
32:            'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
36: _allocate_lock = thread.allocate_lock
37: _get_ident = thread.get_ident
39: del thread
67:                 # Issue #4188: calling current_thread() can incur an infinite
73:                     name = "<OS thread %d>" % ident
93:     The func will be passed to sys.setprofile() for each thread, before its
103:     The func will be passed to sys.settrace() for each thread, before its run()
117:     A reentrant lock must be released by the thread that acquired it. Once a
118:     thread has acquired a reentrant lock, the same thread may acquire it again
119:     without blocking; the thread must release it once for each time it has
126:     """A reentrant lock must be released by the thread that acquired it. Once a
127:        thread has acquired a reentrant lock, the same thread may acquire it
128:        again without blocking; the thread must release it once for each time it
150:         When invoked without arguments: if this thread already owns the lock,
152:         if another thread owns the lock, block until the lock is unlocked. Once
153:         the lock is unlocked (not owned by any thread), then grab ownership, set
154:         the recursion level to one, and return. If more than one thread is
191:         by any thread), and if any other threads are blocked waiting for the
194:         locked and owned by the calling thread.
196:         Only call this method when the calling thread owns the lock. A
246:     notified by another thread.
257:        notified by another thread.
301:         # Return True if lock is owned by current_thread.
312:         If the calling thread has not acquired the lock when this method is
317:         variable in another thread, or until the optional timeout occurs. Once
376:         If the calling thread has not acquired the lock when this method is
403:         If the calling thread has not acquired the lock when this method
445:         on entry, block, waiting until some other thread has called release() to
482:         When the counter is zero on entry and another thread is waiting for it
483:         to become larger than zero again, wake up that thread.
528:         When the counter is zero on entry and another thread is waiting for it
529:         to become larger than zero again, wake up that thread.
567:         # private!  called by Thread._reset_internal_locks by _after_fork()
601:         block until another thread calls set() to set the flag to true, or until
617: # Helper to generate new thread names
619: _counter() # Consume 0 so first non-main thread has id 1.
620: def _newname(template="Thread-%d"):
623: # Active thread administration
625: _active = {}    # maps thread id to Thread object
632:     """A class that represents a thread of control.
649:         *name* is the thread name. By default, a unique name is constructed of
650:         the form "Thread-N" where N is a small decimal number.
658:         the base class constructor (Thread.__init__()) before doing anything
659:         else to the thread.
694:         return current_thread().daemon
697:         assert self.__initialized, "Thread.__init__() was not called"
710:         """Start the thread's activity.
712:         It must be called at most once per thread object. It arranges for the
713:         object's run() method to be invoked in a separate thread of control.
716:         same thread object.
720:             raise RuntimeError("thread.__init__() not called")
724:             self._note("%s.start(): starting thread", self)
728:             _start_new_thread(self.__bootstrap, ())
736:         """Method representing the thread's activity.
748:             # Avoid a refcycle if the thread is running a function with
749:             # an argument that has a member that points to the thread.
755:         # happen when a daemon thread wakes up at an unfortunate
783:                 self._note("%s.__bootstrap(): thread started", self)
805:                     print>>_sys.stderr, ("Exception in thread %s:\n%s" %
814:                             "Exception in thread " + self.name +
860:         "Remove current thread from the dict of currently running threads."
862:         # Notes about running with dummy_thread:
864:         # Must take care to not raise an exception if dummy_thread is being
866:         # dummy_threading).  dummy_thread.get_ident() always returns -1 since
867:         # there is only one thread if dummy_thread is being used.  Thus
868:         # len(_active) is always <= 1 here, and any Thread instance created
869:         # overwrites the (if any) thread currently registered in _active.
872:         # gets overwritten the instant an instance of Thread is created; both
873:         # threads return -1 from dummy_thread.get_ident() and thus have the
876:         # it gets a KeyError if another Thread instance was created.
888:                 # could try to acquire the lock again in the same thread, (in
889:                 # current_thread()), and would block.
895:         """Wait until the thread terminates.
897:         This blocks the calling thread until the thread whose join() method is
905:         thread is still alive, the join() call timed out.
908:         block until the thread terminates.
910:         A thread can be join()ed many times.
913:         thread as that would cause a deadlock. It is also an error to join() a
914:         thread before it has been started and attempts to do so raises the same
919:             raise RuntimeError("Thread.__init__() not called")
921:             raise RuntimeError("cannot join thread before it is started")
922:         if self is current_thread():
923:             raise RuntimeError("cannot join current thread")
927:                 self._note("%s.join(): waiting until thread stops", self)
934:                     self._note("%s.join(): thread stopped", self)
946:                         self._note("%s.join(): thread stopped", self)
957:         assert self.__initialized, "Thread.__init__() not called"
961:         assert self.__initialized, "Thread.__init__() not called"
968:         """Thread identifier of this thread or None if it has not been started.
970:         This is a nonzero integer. See the thread.get_ident() function. Thread
971:         identifiers may be recycled when a thread exits and another thread is
972:         created. The identifier is available even after the thread has exited.
975:         assert self.__initialized, "Thread.__init__() not called"
979:         """Return whether the thread is alive.
986:         assert self.__initialized, "Thread.__init__() not called"
992:         """A boolean value indicating whether this thread is a daemon thread (True) or not (False).
995:         raised. Its initial value is inherited from the creating thread; the
996:         main thread is not a daemon thread and therefore all threads created in
997:         the main thread default to daemon = False.
1003:         assert self.__initialized, "Thread.__init__() not called"
1008:             raise RuntimeError("Thread.__init__() not called")
1010:             raise RuntimeError("cannot set daemon status of active thread");
1041: class _Timer(Thread):
1051:         Thread.__init__(self)
1068: # Special thread class to represent the main thread
1074:         Thread.__init__(self, name="MainThread")
1103: # Dummy thread class to represent threads not started here.
1105: # If they invoke anything in threading.py that calls current_thread(), they
1107: # Their purpose is to return *something* from current_thread().
1114:         Thread.__init__(self, name=_newname("Dummy-%d"))
1116:         # Thread.__block consumes an OS-level locking primitive, which
1130:         assert False, "cannot join a dummy thread"
1136:     """Return the current Thread object, corresponding to the caller's thread of control.
1138:     If the caller's thread of control was not created through the threading
1139:     module, a dummy thread object with limited functionality is returned.
1145:         ##print "current_thread(): no current thread for", _get_ident()
1151:     """Return the number of Thread objects currently alive.
1167:     """Return a list of all Thread objects currently alive.
1169:     The list includes daemonic threads, dummy thread objects created by
1170:     current_thread(), and the main thread. It excludes terminated threads and
1177: from thread import stack_size
1179: # Create the main thread object,
1185: # get thread-local implementation, either from the thread
1188: # NOTE: Thread local classes follow: the Grumpy version of this file copies
1196:         key = '_local__key', 'thread.local.' + str(id(self))
1204:         # We need to create the thread dict in anticipation of
1208:         current_thread().__dict__[key] = dict
1214:     d = current_thread().__dict__.get(key)
1217:         current_thread().__dict__[key] = d
1279:         for thread in threads:
1281:                 __dict__ = thread.__dict__
1283:                 # Thread is dying, rest in peace.
1290:                     pass # didn't have anything in this thread
1300:     # by another (non-forked) thread.  http://bugs.python.org/issue874900
1304:     # fork() only copied the current thread; clear references to others.
1306:     current = current_thread()
1308:         for thread in _enumerate():
1311:             if hasattr(thread, '_reset_internal_locks'):
1312:                 thread._reset_internal_locks()
1313:             if thread is current:
1314:                 # There is only one active thread. We reset the ident to
1317:                 thread.__ident = ident
1318:                 new_active[ident] = thread
1321:                 thread.__stop()
1368:             Thread.__init__(self, name="Producer")
1384:             Thread.__init__(self, name="Consumer")
19: #  This threading model was originally inspired by Java, and inherited
29: # Rename some stuff so "from threading import *" is safe
30: __all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
45:                         module='threading', message='sys.exc_clear')
68:                 # recursion if it has to create a DummyThread on the fly.
91:     """Set a profile function for all threads started from the threading module.
101:     """Set a trace function for all threads started from the threading module.
245:     A condition variable allows one or more threads to wait until they are
256:     """Condition variables allow one or more threads to wait until they are
374:         """Wake up one or more threads waiting on this condition, if any.
379:         This method wakes up at most n of the threads waiting for the condition
380:         variable; it is a no-op if no threads are waiting.
401:         """Wake up all threads waiting on this condition.
449:         which blocked threads are awakened should not be relied on. There is no
579:         All threads waiting for the flag to become true are awakened. Threads
590:         Subsequently, threads calling wait() will block until set() is called to
629: # Main class for threads
643:         *group* should be None; reserved for future extension when a ThreadGroup
683:         if hasattr(self, '__block'):  # DummyThread deletes self.__block
693:         # Overridden in _MainThread and _DummyThread
722:             raise RuntimeError("threads can only be started once")
763:         # reported.  Also, we only suppress them for daemonic threads;
835:                 # test_threading.test_no_refcycle_through_target when
871:         # An instance of _MainThread is always created by 'threading'.  This
874:         # same key in the dict.  So when the _MainThread instance created by
875:         # 'threading' tries to clean itself up when atexit calls this method
879:         # _active if dummy_threading is being used is a red herring.  But
880:         # since it isn't if dummy_threading is *not* being used then don't
891:             if 'dummy_threading' not in _sys.modules:
953:         It has no semantics. Multiple threads may be given the same name. The
983:         returns a list of all alive threads.
999:         The entire Python program exits when no alive non-daemon threads are
1085:         t = _pickSomeNonDaemonThread()
1088:                 self._note("%s: waiting for other threads", self)
1091:             t = _pickSomeNonDaemonThread()
1108: # They are marked as daemon threads so we won't wait for them
1117:         # can never be used by a _DummyThread.  Since a _DummyThread
1146:         return _DummyThread()
1171:     threads that have not yet been started.
1181: # (Py_Main) as threading._shutdown.
1183: _shutdown = _MainThread()._exitfunc
1189: # these from _threading_local.py to avoid circular dependency issues.
1272:             threads = _enumerate()
1274:             # If enumerating the current threads fails, as it seems to do
1292: # END _threading_local.py copy
1296:     # is called from PyOS_AfterFork.  Here we cleanup threading module state
1401:         t = ProducerThread(Q, NI)
1404:     C = ConsumerThread(Q, NI*NP)
850:         # DummyThreads delete self.__block, but they have no waiters to
1295:     # This function is called by Python/ceval.c:PyEval_ReInitThreads which
android.googlesource.com/trusty/lk/common:include/kernel/thread.h: [ master, ]
91: typedef struct thread {
47: #define THREAD_STATS 1
48: #define THREAD_STACK_HIGHWATER 1
51: enum thread_state {
52:     THREAD_SUSPENDED = 0,
53:     THREAD_READY,
54:     THREAD_RUNNING,
55:     THREAD_BLOCKED,
56:     THREAD_SLEEPING,
57:     THREAD_DEATH,
60: typedef int (*thread_start_routine)(void *arg);
63: enum thread_tls_list {
79: #define THREAD_FLAG_DETACHED                  (1U<<0)
80: #define THREAD_FLAG_FREE_STACK                (1U<<1)
81: #define THREAD_FLAG_FREE_STRUCT               (1U<<2)
82: #define THREAD_FLAG_REAL_TIME                 (1U<<3)
83: #define THREAD_FLAG_IDLE                      (1U<<4)
84: #define THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK  (1U<<5)
85: #define THREAD_FLAG_EXIT_ON_PANIC             (1U<<6)
86: #define THREAD_FLAG_FREE_SHADOW_STACK         (1U<<7)
87: #define THREAD_FLAG_FREE_LIBC_STATE           (1U<<8)
89: #define THREAD_MAGIC (0x74687264) // 'thrd'
102:     struct list_node thread_list_node;
137: } thread_t;
140: #define thread_curr_cpu(t) ((t)->curr_cpu)
141: #define thread_pinned_cpu(t) ((t)->pinned_cpu)
142: #define thread_set_curr_cpu(t,c) ((t)->curr_cpu = (c))
144: #define thread_curr_cpu(t) (0)
145: #define thread_pinned_cpu(t) (-1)
146: #define thread_set_curr_cpu(t,c) do {} while(0)
210: static inline void thread_sleep(lk_time_t delay_ms) {
236: static inline uint thread_lock_owner_get(void) {
240: static inline void thread_lock_complete(void) {
246: static inline void thread_unlock_prepare(void) {
253: #define THREAD_LOCK(state) \
258: #define THREAD_UNLOCK(state) \
262: static inline void thread_lock_ints_disabled(void) {
268: static inline void thread_unlock_ints_disabled(void) {
273: static inline bool thread_lock_held(void)
284: static inline __ALWAYS_INLINE uintptr_t thread_tls_get(thread_t *t, uint entry)
301: #define thread_tls_set(t,e,v) \
309: static inline void thread_set_flag(thread_t *t, uint flag, bool enable)
320: static inline bool thread_get_flag(thread_t *t, uint flag)
339: static inline void thread_set_flag_exit_on_panic(thread_t *t, bool enable)
344: static inline bool thread_get_flag_exit_on_panic(thread_t *t)
351: struct thread_stats {
369: #define THREAD_STATS_INC(name) do { thread_stats[arch_curr_cpu_num()].name++; } while(0)
373: #define THREAD_STATS_INC(name) do { } while (0)
24: #define __KERNEL_THREAD_H
294: static inline __ALWAYS_INLINE uintptr_t __thread_tls_set(thread_t *t,
23: #ifndef __KERNEL_THREAD_H
32: #include <arch/thread.h>
62: /* thread local storage */
107:     enum thread_state state;
123:     struct arch_thread arch;
126:     thread_start_routine entry;
133:     /* thread local storage */
149: /* thread priority */
176: void thread_init_early(void);
177: void thread_init(void);
178: void thread_become_idle(void) __NO_RETURN;
179: void thread_secondary_cpu_init_early(void);
180: void thread_secondary_cpu_entry(void) __NO_RETURN;
181: void thread_set_name(const char *name);
184:  * thread_set_priority() - set priority of current thread
185:  * @priority:      Priority for the current thread,
190:  *                 holding the thread lock.
192: void thread_set_priority(int priority);
195:  * thread_set_pinned_cpu() - Pin thread to a given CPU.
196:  * @t:             Thread to pin
197:  * @cpu:           cpu id on which to pin the thread
200:  *                 holding the thread lock.
202: void thread_set_pinned_cpu(thread_t* t, int cpu);
204: thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size);
205: thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size, size_t shadow_stack_s...(5 bytes skipped)...
206: status_t thread_resume(thread_t *);
207: void thread_exit(int retcode) __NO_RETURN;
208: void thread_sleep_ns(lk_time_ns_t delay_ns);
209: void thread_sleep_until_ns(lk_time_ns_t target_time_ns);
211:     thread_sleep_ns(delay_ms * 1000ULL * 1000);
213: status_t thread_detach(thread_t *t);
214: status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout);
215: status_t thread_detach_and_resume(thread_t *t);
216: status_t thread_set_real_time(thread_t *t);
218: void dump_thread(thread_t *t);
219: void arch_dump_thread(thread_t *t);
223: void thread_yield(void); /* give up the cpu voluntarily */
224: void thread_preempt(void); /* get preempted (inserted into head of run queue) */
225: void thread_block(void); /* block on something and reschedule */
226: void thread_unblock(thread_t *t, bool resched); /* go back in the run queue */
228: /* the current thread */
229: thread_t *get_current_thread(void);
230: void set_current_thread(thread_t *);
233: extern spin_lock_t thread_lock;
234: extern atomic_uint thread_lock_owner;
237:     return atomic_load_explicit(&thread_lock_owner, memory_order_relaxed);
241:     DEBUG_ASSERT(thread_lock_owner_get() == SMP_MAX_CPUS);
242:     atomic_store_explicit(&thread_lock_owner, arch_curr_cpu_num(),
248:     DEBUG_ASSERT(thread_lock_owner_get() == arch_curr_cpu_num());
249:     atomic_store_explicit(&thread_lock_owner, (uint)SMP_MAX_CPUS,
255:     spin_lock_irqsave(&thread_lock, state); \
256:     thread_lock_complete()
259:     thread_unlock_prepare(); \
260:     spin_unlock_irqrestore(&thread_lock, state)
264:     spin_lock(&thread_lock);
265:     thread_lock_complete();
269:     thread_unlock_prepare();
270:     spin_unlock(&thread_lock);
278:     ret = thread_lock_owner_get() == arch_curr_cpu_num();
283: /* thread local storage */
291:     return thread_tls_get(get_current_thread(), entry);
304:         __thread_tls_set(t, e, v); \
307: #define tls_set(e,v) thread_tls_set(get_current_thread(), e, v)
311:     THREAD_LOCK(state);
317:     THREAD_UNLOCK(state);
323:     THREAD_LOCK(state);
325:     THREAD_UNLOCK(state);
330:  * thread_set_flag_exit_on_panic - Set flag to ignore panic in specific thread
331:  * @t:       Thread to set flag on
332:  * @enable:  If %true, exit thread instead of halting system if panic is called
341:     thread_set_flag(t, THREAD_FLAG_EXIT_ON_PANIC, enable);
346:     return thread_get_flag(t, THREAD_FLAG_EXIT_ON_PANIC);
349: /* thread level statistics */
350: #if THREAD_STATS
367: extern struct thread_stats thread_stats[SMP_MAX_CPUS];
220: void dump_all_threads(void);
336:  * Should only be used for kernel test threads as it is generally not safe to
android.googlesource.com/platform/external/jazzer-api:agent/src/jmh/java/com/code_intelligence/jazzer/runtime/FuzzerCallbacksBenchmark.java: [ master, ]
173:     String needle;
179:       needle = randomString(length, asciiOnly);
199:     FuzzerCallbacks.traceStrstr(state.haystack, state.needle, state.pc);
204:     FuzzerCallbacksOptimizedNonCritical.traceStrstr(state.haystack, state.needle, state.pc);
211:     FuzzerCallbacksOptimizedCritical.traceStrstrJava(state.haystack, state.needle, state.pc);
217:     FuzzerCallbacksOptimizedNonCritical.traceStrstrJava(state.haystack, state.needle, state.pc);
20: import java.util.concurrent.ThreadLocalRandom;
83:       Random random = ThreadLocalRandom.current();
142:       Random random = ThreadLocalRandom.current();
184:           ThreadLocalRandom.current()
android.googlesource.com/platform/external/starlark-go:starlark/eval.go: [ master, ]
30: type Thread struct {
27: // A Thread contains the state of a Starlark thread,
28: // such as its call stack and thread-local storage.
29: // The Thread is threaded throughout the evaluator.
31: 	// Name is an optional name that describes the thread, for debugging.
40: 	Print func(thread *Thread, msg string)
48: 	Load func(thread *Thread, module string) (StringDict, error)
50: 	// steps counts abstract computation steps executed by this thread.
56: 	// locals holds arbitrary "thread-local" Go values belonging to the client.
65: // by this thread. It is incremented by the interpreter. It may be used
70: func (thread *Thread) ExecutionSteps() uint64 {
71: 	return thread.steps
75: // computation steps that may be executed by this thread. If the
76: // thread's step counter exceeds this limit, the interpreter calls
77: // thread.Cancel("too many steps").
78: func (thread *Thread) SetMaxExecutionSteps(max uint64) {
79: 	thread.maxSteps = max
82: // Cancel causes execution of Starlark code in the specified thread to
85: // if the thread is currently in a call to a built-in function.
89: // Unlike most methods of Thread, it is safe to call Cancel from any
90: // goroutine, even if the thread is actively executing.
91: func (thread *Thread) Cancel(reason string) {
93: 	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason)), nil, unsafe.Pointer(&reason))
96: // SetLocal sets the thread-local value associated with the specified key.
98: func (thread *Thread) SetLocal(key string, value interface{}) {
99: 	if thread.locals == nil {
100: 		thread.locals = make(map[string]interface{})
102: 	thread.locals[key] = value
105: // Local returns the thread-local value associated with the specified key.
106: func (thread *Thread) Local(key string) interface{} {
107: 	return thread.locals[key]
115: func (thread *Thread) CallFrame(depth int) CallFrame {
116: 	return thread.frameAt(depth).asCallFrame()
119: func (thread *Thread) frameAt(depth int) *frame {
120: 	return thread.stack[len(thread.stack)-1-depth]
123: // CallStack returns a new slice containing the thread's stack of call frames.
124: func (thread *Thread) CallStack() CallStack {
125: 	frames := make([]CallFrame, len(thread.stack))
126: 	for i, fr := range thread.stack {
133: func (thread *Thread) CallStackDepth() int { return len(thread.stack) }
230: // a copy of the thread's stack at the moment of the error.
251: func (thread *Thread) evalError(err error) *EvalError {
254: 		CallStack: thread.CallStack(),
318: // Thread is the state associated with the Starlark thread.
332: func ExecFile(thread *Thread, filename string, src interface{}, predeclared StringDict) (StringDict, error) {
339: 	g, err := mod.Init(thread, predeclared)
408: func (prog *Program) Init(thread *Thread, predeclared StringDict) (StringDict, error) {
411: 	_, err := Call(thread, toplevel, nil, nil)
418: // ExecREPLChunk compiles and executes file f in the specified thread
426: func ExecREPLChunk(f *syntax.File, thread *Thread, globals StringDict) error {
457: 	_, err := Call(thread, toplevel, nil, nil)
512: func Eval(thread *Thread, filename string, src interface{}, env StringDict) (Value, error) {
521: 	return Call(thread, f, nil, nil)
536: func EvalExpr(thread *Thread, expr syntax.Expr, env StringDict) (Value, error) {
541: 	return Call(thread, fn, nil, nil)
1002: 			needle, ok := x.(String)
1006: 			return Bool(strings.Contains(string(y), string(needle))), nil
1008: 			switch needle := x.(type) {
1010: 				return Bool(strings.Contains(string(y), string(needle))), nil
1013: 				if err := AsInt(needle, &b); err != nil {
1188: func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) {
1196: 	// Optimization: use slack portion of thread.stack
1198: 	if n := len(thread.stack); n < cap(thread.stack) {
1199: 		fr = thread.stack[n : n+1][0]
1205: 	if thread.stack == nil {
1206: 		// one-time initialization of thread
1207: 		if thread.maxSteps == 0 {
1208: 			thread.maxSteps-- // (MaxUint64)
1212: 	thread.stack = append(thread.stack, fr) // push
1216: 	thread.beginProfSpan()
1217: 	result, err := c.CallInternal(thread, args, kwargs)
1218: 	thread.endProfSpan()
1228: 			err = thread.evalError(err)
1233: 	thread.stack = thread.stack[:len(thread.stack)-1] // pop
android.googlesource.com/platform/external/rust/crates/crossbeam-utils:src/thread.rs: [ master, ]
487:     thread: thread::Thread,
545:     pub fn thread(&self) -> &thread::Thread {
319: pub struct ScopedThreadBuilder<'scope, 'env> {
324: impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> {
6: //! use crossbeam_utils::thread;
14: //! thread::scope(|s| {
28: //! use std::thread;
39: //!     threads.push(thread::spawn(move || {
44: //! for thread in threads {
45: //!     thread.join().unwrap();
72: //! If a variable is borrowed by a thread, the thread must complete before the variable is
73: //! destroyed. Threads spawned using [`std::thread::spawn`] can only borrow variables with the
74: //! `'static` lifetime because the borrow checker cannot be sure when the thread will complete.
77: //! scope. Whenever a scope spawns a thread, it promises to join the thread before the scope ends.
84: //! tricky because argument `s` lives *inside* the invocation of `thread::scope()` and as such
88: //! use crossbeam_utils::thread;
90: //! thread::scope(|s| {
94: //!         s.spawn(|_| println!("nested thread"));
99: //! Fortunately, there is a solution. Every scoped thread is passed a reference to its scope as an
103: //! use crossbeam_utils::thread;
105: //! thread::scope(|s| {
109: //!         s.spawn(|_| println!("nested thread"));
120: use std::thread;
136: ...(2 bytes skipped).../ **Note:** Since Rust 1.63, this function is soft-deprecated in favor of the more efficient [`std::thread::scope`].
141: /// use crossbeam_utils::thread;
145: /// thread::scope(|s| {
147: ///         println!("A child thread borrowing `var`: {:?}", var);
151: pub fn scope<'env, F, R>(f: F) -> thread::Result<R>
197:     /// The list of the thread join handles.
198:     handles: SharedVec<SharedOption<thread::JoinHandle<()>>>,
210:     /// Spawns a scoped thread.
213:     /// is that this thread is scoped, meaning it's guaranteed to terminate before the scope exits,
216:     /// The scoped thread is passed a reference to this scope as an argument, which can be used for
220:     /// [join](ScopedJoinHandle::join) the thread before the scope exits.
222:     /// This will create a thread using default parameters of [`ScopedThreadBuilder`], if you want to specify the
223:     /// stack size or the name of the thread, use this API instead.
225:     /// [`spawn`]: std::thread::spawn
229:     /// Panics if the OS fails to create a thread; use [`ScopedThreadBuilder::spawn`]
235:     /// use crossbeam_utils::thread;
237:     /// thread::scope(|s| {
239:     ///         println!("A child thread is running");
243:     ///     // Join the thread and retrieve its result.
256:             .expect("failed to spawn scoped thread")
259:     /// Creates a builder that can configure a thread before spawning.
264:     /// use crossbeam_utils::thread;
266:     /// thread::scope(|s| {
268:     ///         .spawn(|_| println!("A child thread is running"))
275:             builder: thread::Builder::new(),
286: /// Configures the properties of a new thread.
290: /// - [`name`]: Specifies an [associated name for the thread][naming-threads].
291: /// - [`stack_size`]: Specifies the [desired stack size for the thread][stack-size].
294: /// thread handle with the given configuration.
298: /// thread.
303: /// use crossbeam_utils::thread;
305: /// thread::scope(|s| {
307: ///         .spawn(|_| println!("Running a child thread"))
316: /// [naming-threads]: std::thread#naming-threads
317: /// [stack-size]: std::thread#stack-size
321:     builder: thread::Builder,
325:     /// Sets the name for the new thread.
334:     /// use crossbeam_utils::thread;
335:     /// use std::thread::current;
337:     /// thread::scope(|s| {
339:     ///         .name("my thread".to_string())
340:     ///         .spawn(|_| assert_eq!(current().name(), Some("my thread")))
345:     /// [naming-threads]: std::thread#naming-threads
351:     /// Sets the size of the stack for the new thread.
360:     /// use crossbeam_utils::thread;
362:     /// thread::scope(|s| {
365:     ///         .spawn(|_| println!("Running a child thread"))
370:     /// [stack-size]: std::thread#stack-size
376:     /// Spawns a scoped thread with this configuration.
378:     /// The scoped thread is passed a reference to this scope as an argument, which can be used for
381:     /// The returned handle can be used to manually join the thread before the scope exits.
386:     /// [`io::Result`] to capture any failure to create the thread at
393:     /// Panics if a thread name was set and it contained null bytes.
398:     /// use crossbeam_utils::thread;
400:     /// thread::scope(|s| {
403:     ///             println!("A child thread is running");
408:     ///     // Join the thread and retrieve its result.
422:         // Spawn the thread and grab its join handle and thread handle.
423:         let (handle, thread) = {
426:             // A clone of the scope that will be moved into the new thread.
433:             // Spawn the thread.
455:             let thread = handle.thread().clone();
457:             (handle, thread)
466:             thread,
475: /// A handle that can be used to join its scoped thread.
480:     /// A join handle to the spawned thread.
481:     handle: SharedOption<thread::JoinHandle<()>>,
486:     /// A handle to the the spawned thread.
494:     /// Waits for the thread to finish and returns its result.
496:     /// If the child thread panics, an error is returned. Note that if panics are implemented by
501:     /// This function may panic on some platforms if a thread attempts to join itself or otherwise
507:     /// use crossbeam_utils::thread;
509:     /// thread::scope(|s| {
510:     ///     let handle1 = s.spawn(|_| println!("I'm a happy thread :)"));
511:     ///     let handle2 = s.spawn(|_| panic!("I'm a sad thread :("));
513:     ///     // Join the first thread and verify that it succeeded.
517:     ///     // Join the second thread and verify that it panicked.
522:     pub fn join(self) -> thread::Result<T> {
527:         // Join the thread and then take the result out of its inner closure.
533:     /// Returns a handle to the underlying thread.
538:     /// use crossbeam_utils::thread;
540:     /// thread::scope(|s| {
541:     ///     let handle = s.spawn(|_| println!("A child thread is running"));
542:     ///     println!("The child thread ID: {:?}", handle.thread().id());
546:         &self.thread
552:         use std::os::unix::thread::{JoinHandleExt, RawPthread};
1: //! Threads that can borrow variables from the stack.
3: //! Create a scope when spawned threads need to access variables on the stack:
23: //! # Why scoped threads?
25: //! Suppose we wanted to re-write the previous example using plain threads:
36: //! let mut threads = Vec::new();
64: //! The problem here is that spawned threads are not allowed to borrow variables on stack because
67: //! Scoped threads are a mechanism to guarantee to the compiler that spawned threads will be joined
70: //! # How scoped threads work
76: //! A scope creates a clear boundary between variables outside the scope and threads inside the
78: //! This way we guarantee to the borrow checker that scoped threads only live within the scope and
81: //! # Nesting scoped threads
83: //! Sometimes scoped threads need to spawn more threads within the same scope. This is a little
85: //! cannot be borrowed by scoped threads:
100: //! argument, which can be used for spawning nested threads:
128: /// Creates a new scope for spawning threads.
130: /// All child threads that haven't been manually joined will be automatically joined just before
131: /// this function invocation ends. If all joined threads have successfully completed, `Ok` is
132: /// returned with the return value of `f`. If any of the joined threads has panicked, an `Err` is
133: /// returned containing errors from panicked threads. Note that if panics are implemented by
169:     // Join all remaining spawned threads.
181:     // If any of the child threads have panicked, return the panic errors.
195: /// A scope for spawning threads.
217:     /// spawning nested threads.
329:     /// For more information about named threads, see [here][naming-threads].
355:     /// For more information about the stack size for threads, see [here][stack-size].
379:     /// spawning nested threads.
502:     /// may create a deadlock with joining threads.
524:         // for nested scopes before joining remaining threads.
555:             fn as_pthread_t(&self) -> RawPthread {
557:                 // for nested scopes before joining remaining threads.
559:                 handle.as_ref().unwrap().as_pthread_t()
561:             fn into_pthread_t(self) -> RawPthread {
562:                 self.as_pthread_t()
571:                 // for nested scopes before joining remaining threads.
272:     pub fn builder<'scope>(&'scope self) -> ScopedThreadBuilder<'scope, 'env> {
273:         ScopedThreadBuilder {
312: /// [`name`]: ScopedThreadBuilder::name
313: /// [`stack_size`]: ScopedThreadBuilder::stack_size
314: /// [`spawn`]: ScopedThreadBuilder::spawn
346:     pub fn name(mut self, name: String) -> ScopedThreadBuilder<'scope, 'env> {
371:     pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'scope, 'env> {
478: /// [`ScopedThreadBuilder::spawn`] method.
github.com/google/skylark:eval.go: [ master, ]
29: type Thread struct {
26: // A Thread contains the state of a Skylark thread,
27: // such as its call stack and thread-local storage.
28: // The Thread is threaded throughout the evaluator.
36: 	Print func(thread *Thread, msg string)
44: 	Load func(thread *Thread, module string) (StringDict, error)
46: 	// locals holds arbitrary "thread-local" Go values belonging to the client.
51: // SetLocal sets the thread-local value associated with the specified key.
53: func (thread *Thread) SetLocal(key string, value interface{}) {
54: 	if thread.locals == nil {
55: 		thread.locals = make(map[string]interface{})
57: 	thread.locals[key] = value
60: // Local returns the thread-local value associated with the specified key.
61: func (thread *Thread) Local(key string) interface{} {
62: 	return thread.locals[key]
67: func (thread *Thread) Caller() *Frame { return thread.frame.parent }
70: func (thread *Thread) TopFrame() *Frame { return thread.frame }
117: // The Frames of a thread are structured as a spaghetti stack, not a
217: // Thread is the state associated with the Skylark thread.
231: func ExecFile(thread *Thread, filename string, src interface{}, predeclared StringDict) (StringDict, error) {
238: 	g, err := mod.Init(thread, predeclared)
280: func (prog *Program) Init(thread *Thread, predeclared StringDict) (StringDict, error) {
283: 	_, err := Call(thread, toplevel, nil, nil)
328: func Eval(thread *Thread, filename string, src interface{}, env StringDict) (Value, error) {
341: 	return Call(thread, fn, nil, nil)
724: 			needle, ok := x.(String)
728: 			return Bool(strings.Contains(string(y), string(needle))), nil
850: func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) {
856: 	thread.frame = &Frame{parent: thread.frame, callable: c}
857: 	result, err := c.CallInternal(thread, args, kwargs)
858: 	thread.frame = thread.frame.parent
android.googlesource.com/platform/system/nfc:src/adaptation/NfcAdaptation.cc: [ master, ]
665: uint32_t NfcAdaptation::Thread(__attribute__((unused)) uint32_t arg) {
1086: ThreadMutex::ThreadMutex() {
1136: ThreadCondVar::ThreadCondVar() {
1103: ThreadMutex::~ThreadMutex() { pthread_mutex_destroy(&mMutex); }
1154: ThreadCondVar::~ThreadCondVar() { pthread_cond_destroy(&mCondVar); }
1193: AutoThreadMutex::AutoThreadMutex(ThreadMutex& m) : mm(m) { mm.lock(); }
1204: AutoThreadMutex::~AutoThreadMutex() { mm.unlock(); }
470:   // Android already logs thread_id, proc_id, timestamp, so disable those.
552:     GKI_create_task((TASKPTR)Thread, MMI_TASK, (int8_t*)"NFCA_THREAD", nullptr, 0,
632: ** Description: signal the CondVar to release the thread that is waiting
658: ** Function:    NfcAdaptation::Thread()
666:   const char* func = "NfcAdaptation::Thread";
90: ThreadMutex NfcAdaptation::sLock;
91: ThreadCondVar NfcAdaptation::mHalOpenCompletedEvent;
92: ThreadCondVar NfcAdaptation::mHalCloseCompletedEvent;
487:     // of a byte array is ambiguous and needlessly difficult to configure.
549:                   (pthread_cond_t*)nullptr, nullptr);
553:                     (pthread_cond_t*)nullptr, nullptr);
660: ** Description: Creates work threads
670:     ThreadCondVar CondVar;
673:                     (pthread_cond_t*)CondVar, (pthread_mutex_t*)CondVar);
1079: ** Function:    ThreadMutex::ThreadMutex()
1087:   pthread_mutexattr_t mutexAttr;
1089:   pthread_mutexattr_init(&mutexAttr);
1090:   pthread_mutex_init(&mMutex, &mutexAttr);
1091:   pthread_mutexattr_destroy(&mutexAttr);
1096: ** Function:    ThreadMutex::~ThreadMutex()
1107: ** Function:    ThreadMutex::lock()
1114: void ThreadMutex::lock() { pthread_mutex_lock(&mMutex); }
1118: ** Function:    ThreadMutex::unblock()
1125: void ThreadMutex::unlock() { pthread_mutex_unlock(&mMutex); }
1129: ** Function:    ThreadCondVar::ThreadCondVar()
1137:   pthread_condattr_t CondAttr;
1139:   pthread_condattr_init(&CondAttr);
1140:   pthread_cond_init(&mCondVar, &CondAttr);
1142:   pthread_condattr_destroy(&CondAttr);
1147: ** Function:    ThreadCondVar::~ThreadCondVar()
1158: ** Function:    ThreadCondVar::wait()
1165: void ThreadCondVar::wait() {
1166:   pthread_cond_wait(&mCondVar, *this);
1167:   pthread_mutex_unlock(*this);
1172: ** Function:    ThreadCondVar::signal()
1179: void ThreadCondVar::signal() {
1181:   pthread_cond_signal(&mCondVar);
312:   AutoThreadMutex a(sLock);
551:     AutoThreadMutex guard(mCondVar);
572:   AutoThreadMutex a(sLock);
671:     AutoThreadMutex guard(CondVar);
1180:   AutoThreadMutex a(*this);
1186: ** Function:    AutoThreadMutex::AutoThreadMutex()
1197: ** Function:    AutoThreadMutex::~AutoThreadMutex()
android.googlesource.com/platform/external/rust/crates/syn:src/lib.rs: [ master, ]
798: mod thread;
139: //!     bad: std::thread::Thread,
149: //! error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied
152: //! 7 |     bad: std::thread::Thread,
153: //!   |     ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `Thread`
278:     clippy::needless_doctest_main,
279:     clippy::needless_pass_by_value,
android.googlesource.com/platform/tools/netsim:rust/netsim-cxx/src/http_server/thread_pool.rs: [ master, ]
77:     thread: Option<thread::JoinHandle<()>>,
20: pub struct ThreadPool {
27: impl ThreadPool {
61: impl Drop for ThreadPool {
17:     thread,
68:             if let Some(thread) = worker.thread.take() {
69:                 thread.join().unwrap();
82:         let thread = thread::spawn(move || loop {
96:         Worker { id, thread: Some(thread) }
28:     /// Create a new ThreadPool.
30:     /// The size is the number of threads in the pool.
35:     pub fn new(size: usize) -> ThreadPool {
48:         ThreadPool { workers, sender: Some(sender) }
go.googlesource.com/debug:internal/core/thread.go: [ master, ]
8: type Thread struct {
7: // A Thread represents an operating system thread.
9: 	pid  uint64   // thread/process ID
15: func (t *Thread) Pid() uint64 {
19: // Regs returns the set of register values for the thread.
23: func (t *Thread) Regs() []uint64 {
27: func (t *Thread) PC() Address {
31: func (t *Thread) SP() Address {
github.com/kubernetes/minikube:site/package-lock.json: [ master, ]
1127:             "needle": "^2.2.1",
1108:         "needle": {
github.com/google/leveldb:benchmarks/db_bench.cc: [ master, ]
615:     ThreadState* thread;
358: struct ThreadState {
364:   ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {}
612:   struct ThreadArg {
619:   static void ThreadBody(void* v) {
381:   int total_thread_count_;
72: static int FLAGS_threads = 1;
263:     // Just keep the messages from one thread
316:       // Rate is computed on actual elapsed time, not the sum of per-thread
343:   // Each thread goes through the following states:
357: // Per-thread state for concurrent executions of the same benchmark.
474:         total_thread_count_(0) {
569:         num_threads++;  // Add extra thread for writing
622:     ThreadState* thread = arg->thread;
634:     thread->stats.Start();
635:     (arg->bm->*(arg->method))(thread);
636:     thread->stats.Stop();
656:       ++total_thread_count_;
657:       // Seed the thread's random state deterministically based upon thread
660:       arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_);
661:       arg[i].thread->shared = &shared;
678:       arg[0].thread->stats.Merge(arg[i].thread->stats);
680:     arg[0].thread->stats.Report(name);
688:       delete arg[i].thread;
693:   void Crc32c(ThreadState* thread) {
702:       thread->stats.FinishedSingleOp();
708:     thread->stats.AddBytes(bytes);
709:     thread->stats.AddMessage(label);
712:   void SnappyCompress(ThreadState* thread) {
723:       thread->stats.FinishedSingleOp();
727:       thread->stats.AddMessage("(snappy failure)");
732:       thread->stats.AddMessage(buf);
733:       thread->stats.AddBytes(bytes);
737:   void SnappyUncompress(ThreadState* thread) {
748:       thread->stats.FinishedSingleOp();
753:       thread->stats.AddMessage("(snappy failure)");
755:       thread->stats.AddBytes(bytes);
781:   void OpenBench(ThreadState* thread) {
785:       thread->stats.FinishedSingleOp();
789:   void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
791:   void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
793:   void DoWrite(ThreadState* thread, bool seq) {
797:       thread->stats.AddMessage(msg);
808:         const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
812:         thread->stats.FinishedSingleOp();
820:     thread->stats.AddBytes(bytes);
823:   void ReadSequential(ThreadState* thread) {
829:       thread->stats.FinishedSingleOp();
833:     thread->stats.AddBytes(bytes);
836:   void ReadReverse(ThreadState* thread) {
842:       thread->stats.FinishedSingleOp();
846:     thread->stats.AddBytes(bytes);
849:   void ReadRandom(ThreadState* thread) {
855:       const int k = thread->rand.Uniform(FLAGS_num);
860:       thread->stats.FinishedSingleOp();
864:     thread->stats.AddMessage(msg);
867:   void ReadMissing(ThreadState* thread) {
872:       const int k = thread->rand.Uniform(FLAGS_num);
876:       thread->stats.FinishedSingleOp();
880:   void ReadHot(ThreadState* thread) {
886:       const int k = thread->rand.Uniform(range);
889:       thread->stats.FinishedSingleOp();
893:   void SeekRandom(ThreadState* thread) {
899:       const int k = thread->rand.Uniform(FLAGS_num);
904:       thread->stats.FinishedSingleOp();
908:     thread->stats.AddMessage(msg);
911:   void SeekOrdered(ThreadState* thread) {
918:       k = (k + (thread->rand.Uniform(100))) % FLAGS_num;
922:       thread->stats.FinishedSingleOp();
927:     thread->stats.AddMessage(msg);
930:   void DoDelete(ThreadState* thread, bool seq) {
938:         const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num));
941:         thread->stats.FinishedSingleOp();
951:   void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
953:   void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
955:   void ReadWhileWriting(ThreadState* thread) {
956:     if (thread->tid > 0) {
957:       ReadRandom(thread);
959:       // Special thread that keeps writing until other threads are done.
964:           MutexLock l(&thread->shared->mu);
965:           if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
971:         const int k = thread->rand.Uniform(FLAGS_num);
982:       thread->stats.Start();
986:   void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
71: // Number of concurrent threads to run.
359:   int tid;      // 0..n-1 when running in n threads
360:   Random rand;  // Has different seeds for different threads
516:       void (Benchmark::*method)(ThreadState*) = nullptr;
518:       int num_threads = FLAGS_threads;
606:         RunBenchmark(num_threads, name, method);
616:     void (Benchmark::*method)(ThreadState*);
620:     ThreadArg* arg = reinterpret_cast<ThreadArg*>(v);
648:                     void (Benchmark::*method)(ThreadState*)) {
651:     ThreadArg* arg = new ThreadArg[n];
662:       g_env->StartThread(ThreadBody, &arg[i]);
966:             // Other threads have finished
1052:     } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
1053:       FLAGS_threads = n;
github.com/osxfuse/osxfuse:prefpane/externals/google-toolbox-for-mac/XcodePlugin/XcodeHeaders/DevToolsInterface.h: [ master, ]
4872: - (id)thread;
4528:     PBXLSThread *_selectedThread;
4540: - (id)selectedThread;
4689: - (id)threadViewModule;
4771: - (id)selectedThread;
4875: - (BOOL)threadIsRunning;
4876: - (BOOL)threadIsRunningButNotStepping;
4877: - (BOOL)threadStepDidTimeout;
4878: - (BOOL)threadIsSteppable;
4899: - (void)continueThread;
7806: + (void)_startCrashCatchingServiceThread;
11480: - (void)threadRequestUpdateStack:(id)fp8;
11481: - (void)threadRequestStepInstruction:(id)fp8;
11482: - (void)threadRequestNextInstruction:(id)fp8;
11483: - (void)threadRequestStepInto:(id)fp8;
11484: - (void)threadRequestStepOver:(id)fp8;
11485: - (void)threadRequestStepOut:(id)fp8;
11487: - (void)threadRequestContinue:(id)fp8;
11862:     PBXLSThread *_currentThread;
11876: - (id)currentThread;
11883: - (id)threadGroups;
11884: - (id)threadGroupWithName:(id)fp8;
11887: - (id)threads;
11888: - (id)threadWithHandle:(unsigned long)fp8;
11909: - (id)parentThread;
11923: - (void)threadDidUpdate;
11939: @interface PBXLSThread : PBXLSModel
11967: - (int)threadState;
11968: - (int)threadStateStatus;
11997: - (id)threads;
14973:     NSThread *_taskThread;
4527:     NSPopUpButtonCell *_threadsPopUp;
4539: - (void)selectThread:(id)fp8;
4544: - (void)setSelectedThread:(id)fp8;
4554:     PBXDebugThreadViewModule *_threadViewModule;
4770: - (void)setSelectedThread:(id)fp8;
4858:     PBXDebugStackTableHeaderCell *_threadsTablePopUp;
11885: - (id)_threadsOfType:(int)fp8;
11941:     int _threadState;
11943:     int _threadStateStatus;
558: - (BOOL)usesThreadedAnimation;
559: - (void)setUsesThreadedAnimation:(BOOL)fp8;
3033: - (void)_finishSpeculativeCompilationInSeparateThreadWithSpecCompInfo:(id)fp8;
4535: - (id)selectedThreadViewModule;
4537: - (BOOL)allThreadsStopped;
4541: - (void)_emptyThreadsPopUp;
4546: - (void)setThreadsPopUp:(id)fp8;
4552:     PBXDebugProcessAndThreadModule *_processAndThreadModule;
4851: @interface PBXDebugThreadViewModule : PBXDebugDSModelViewModule
4867: - (id)stackTableThreadsPopUp;
7563: @interface PBXDebugProcessAndThreadModule : PBXDebugViewModule
11457: - (void)destroyThreadWithThreadHandle:(unsigned long)fp8;
11461: - (void)willUpdateThreads;
11462: - (void)didUpdateThreads;
11505: - (oneway void)requestSuspendAllThreads;
11872: - (void)debuggerWillUpdateThreads:(id)fp8;
11881: - (void)debuggerDidUpdateThreads:(id)fp8;
11886: - (id)nonRunningThreads;
11958: - (id)parentThreadGroup;
11983: @interface PBXLSThreadGroup : PBXLSModel
13118: - (BOOL)usesThreadedAnimation;
13119: - (void)setUsesThreadedAnimation:(BOOL)fp8;
13379: + (BOOL)useThreadedAnimation;
20671: - (BOOL)usesThreadedAnimation;
20672: - (void)setUsesThreadedAnimation:(BOOL)fp8;
11458: - (id)threadGroupWithName:(id)fp8 create:(BOOL)fp12;
11459: - (oneway void)setState:(int)fp8 status:(int)fp12 isCurrentThread:(BOOL)fp16 forServerThreadHandle:(unsigned long)fp20;
11486: - (void)threadRequest:(byref id)fp8 stepUntilLineNumber:(unsigned int)fp12;
11494: - (void)requestDebuggerMovePCInThread:(unsigned long)fp8 fileSpec:(id)fp12;
11640: - (void)requestPrintDescriptionInThread:(id)fp8 atStackFrame:(id)fp12;
11875: - (void)debugger:(id)fp8 setCurrentThread:(id)fp12;
11995: - (void)debugger:(id)fp8 addThread:(id)fp12;
11996: - (void)debugger:(id)fp8 removeThread:(id)fp12;
11455: - (void)createThreadWithThreadHandle:(unsigned long)fp8 name:(id)fp12 inThreadGroupNamed:(id)fp16;
11873: - (void)debugger:(id)fp8 addThreadGroup:(id)fp12;
11874: - (void)debugger:(id)fp8 removeThreadGroup:(id)fp12;
11970: - (void)debugger:(id)fp8 setThreadState:(int)fp12 status:(int)fp16;
14988: - (BOOL)detachNewThreadSelector:(SEL)fp8 cancelSelector:(SEL)fp12 toTarget:(id)fp16 withObject:(id)fp20;
github.com/google/j2objc:jre_emul/android/platform/libcore/ojluni/src/main/java/java/util/concurrent/CompletableFuture.java: [ master, ]
1791:         volatile Thread thread;
430:     static final class ThreadPerTaskExecutor implements Executor {
2748:             public Thread newThread(Runnable r) {
2747:         static final class DaemonThreadFactory implements ThreadFactory {
69:  * <em>non-async</em> methods may be performed by the thread that
76:  * which case, a new Thread is created to run each task).
83:  * class maintains at most one daemon thread for triggering and
134:      * be set directly if known to be thread-confined, else via CAS.
184:      *   if already claimed by another thread.
187:      *   code for when known to be thread-confined ("now" methods) and
431:         public void execute(Runnable r) { new Thread(r).start(); }
554:          * thread claims ownership.  If async, starts as task -- a
1780:      * Completion for recording and releasing a waiting thread.  This
1794:             this.thread = Thread.currentThread();
1800:             Thread w; // no need to atomically claim
1801:             if ((w = thread) != null) {
1802:                 thread = null;
1808:             if (Thread.interrupted())
1814:                     thread == null);
1825:         final boolean isLive() { return thread != null; }
1857:             q.thread = null;
1862:                     Thread.currentThread().interrupt();
1875:         if (Thread.interrupted())
1901:                 q.thread = null;
2015:      * @throws InterruptedException if the current thread was interrupted
2035:      * @throws InterruptedException if the current thread was interrupted
2526:      * parallel thread, or else an Executor using one thread per async
2528:      * an Executor that provides at least one independent thread.
2749:                 Thread t = new Thread(r);
58:  * <p>When two or more threads attempt to
213:      * linked). Multiple threads can call postComplete, which
221:      * that wake up waiting threads.  The mechanics are similar to
231:      * threads nulling out fields.  We also try to unlink non-isLive
239:      * because they are only visible to other threads upon safe
427:         ForkJoinPool.commonPool() : new ThreadPerTaskExecutor();
1839:                 if (ThreadLocalRandom.nextSecondarySeed() >= 0)
2756:         static final ScheduledThreadPoolExecutor delayer;
2758:             (delayer = new ScheduledThreadPoolExecutor(
2759:                 1, new DaemonThreadFactory())).
github.com/google/guava:guava/src/com/google/common/util/concurrent/AbstractFuture.java: [ master, ]
203:     @CheckForNull volatile Thread thread;
1302:     abstract void putThread(Waiter waiter, Thread newValue);
1382:     void putThread(Waiter waiter, Thread newValue) {
1446:     void putThread(Waiter waiter, Thread newValue) {
1492:     void putThread(Waiter waiter, Thread newValue) {
1337:     static final long WAITER_THREAD_OFFSET;
1426:     final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
169:                 newUpdater(Waiter.class, Thread.class, "thread"),
214:       ATOMIC_HELPER.putThread(this, Thread.currentThread());
225:       // unpark even though the thread has already removed itself from the list. But even if we did
227:       Thread w = thread;
229:         thread = null;
241:    *   <li>This is only called when a waiting thread times out or is interrupted. Both of which
247:     node.thread = null; // mark as 'deleted'
258:         if (curr.thread != null) { // we aren't unlinking this node, update pred.
262:           if (pred.thread == null) { // We raced with another node that unlinked pred. Restart.
409:   //   have observed 12 micros on 64-bit linux systems to wake up a parked thread). So if the
426:    * current thread is interrupted during the call, even if the value is already available.
439:     if (Thread.interrupted()) {
459:               if (Thread.interrupted()) {
474:                 // Remove the waiter, one way or another we are done parking this thread.
495:       if (Thread.interrupted()) {
539:    * current thread is interrupted during the call, even if the value is already available.
547:     if (Thread.interrupted()) {
564:             if (Thread.interrupted()) {
993:         Thread.currentThread().interrupt();
1267:     // arbitrary cycles using a thread local but this should be a good enough solution (it is also
1301:     /** Non-volatile write of the thread to the {@link Waiter#thread} field. */
1371:         WAITER_THREAD_OFFSET = unsafe.objectFieldOffset(Waiter.class.getDeclaredField("thread"));
1383:       UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue);
1433:         AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater,
1493:       waiter.thread = newValue;
392:   /** All waiting threads. */
998:   /** Unblocks all threads and runs all listeners. */
1126:   /** Releases all threads in the {@link #waiters} list, and clears the list. */
1438:       this.waiterThreadUpdater = waiterThreadUpdater;
1447:       waiterThreadUpdater.lazySet(waiter, newValue);
github.com/google/guava:android/guava/src/com/google/common/util/concurrent/AbstractFuture.java: [ master, ]
203:     @CheckForNull volatile Thread thread;
1302:     abstract void putThread(Waiter waiter, Thread newValue);
1382:     void putThread(Waiter waiter, Thread newValue) {
1462:     void putThread(Waiter waiter, Thread newValue) {
1508:     void putThread(Waiter waiter, Thread newValue) {
1337:     static final long WAITER_THREAD_OFFSET;
1442:     final AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater;
169:                 newUpdater(Waiter.class, Thread.class, "thread"),
214:       ATOMIC_HELPER.putThread(this, Thread.currentThread());
225:       // unpark even though the thread has already removed itself from the list. But even if we did
227:       Thread w = thread;
229:         thread = null;
241:    *   <li>This is only called when a waiting thread times out or is interrupted. Both of which
247:     node.thread = null; // mark as 'deleted'
258:         if (curr.thread != null) { // we aren't unlinking this node, update pred.
262:           if (pred.thread == null) { // We raced with another node that unlinked pred. Restart.
409:   //   have observed 12 micros on 64-bit linux systems to wake up a parked thread). So if the
426:    * current thread is interrupted during the call, even if the value is already available.
439:     if (Thread.interrupted()) {
459:               if (Thread.interrupted()) {
474:                 // Remove the waiter, one way or another we are done parking this thread.
495:       if (Thread.interrupted()) {
539:    * current thread is interrupted during the call, even if the value is already available.
547:     if (Thread.interrupted()) {
564:             if (Thread.interrupted()) {
993:         Thread.currentThread().interrupt();
1267:     // arbitrary cycles using a thread local but this should be a good enough solution (it is also
1301:     /** Non-volatile write of the thread to the {@link Waiter#thread} field. */
1371:         WAITER_THREAD_OFFSET = unsafe.objectFieldOffset(Waiter.class.getDeclaredField("thread"));
1383:       UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue);
1449:         AtomicReferenceFieldUpdater<Waiter, Thread> waiterThreadUpdater,
1509:       waiter.thread = newValue;
392:   /** All waiting threads. */
998:   /** Unblocks all threads and runs all listeners. */
1126:   /** Releases all threads in the {@link #waiters} list, and clears the list. */
1454:       this.waiterThreadUpdater = waiterThreadUpdater;
1463:       waiterThreadUpdater.lazySet(waiter, newValue);
github.com/apache/beam:sdks/go/pkg/beam/model/fnexecution_v1/beam_fn_api.pb.go: [ master, ]
2544: 	Thread string `protobuf:"bytes,8,opt,name=thread,proto3" json:"thread,omitempty"`
2628: func (x *LogEntry) GetThread() string {
2543: 	// (Optional) The name of the thread this log statement is associated with.
2630: 		return x.Thread
github.com/google/go-github:github/event_types.go: [ master, ]
917: 	Thread      *PullRequestThread `json:"thread,omitempty"`
913: type PullRequestReviewThreadEvent struct {
910: // The Webhook event name is "pull_request_review_thread".
912: ...(26 bytes skipped)...//docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review_thread
908: // PullRequestReviewThreadEvent is triggered when a comment made as part of a
github.com/google/binnavi:debug/client/windynamorio/drdebug.proto: [ master, ]
347:   optional uint64 thread  = 5; // required
54:     SUSPEND_THREAD        = 9;
55:     RESUME_THREAD         = 10;
75:   repeated uint64 thread_id = 2;
185:     UNKNOWN_THREAD             = 12;
232:   repeated uint64 thread_id = 1;
306:     THREAD_CREATED   = 2;
307:     THREAD_EXITED    = 3;
318:   optional ThreadCreatedInfo   thread_created_info   = 3;
319:   optional ThreadExitedInfo    thread_exited_info    = 4;
330:   optional uint64 thread_id = 3; // required
334:   optional uint64 thread_id = 1; // required
338:   optional uint64 thread_id = 1; // required
357:   optional uint64 thread_id    = 3; // required
365:   optional uint64 thread_id = 1; // required
333: message ThreadCreatedInfo {
337: message ThreadExitedInfo {
84:   optional SuspendThreadArgs       suspend_thread_args        = 9;
85:   optional ResumeThreadArgs        resume_thread_args         = 10;
206:   optional SuspendThreadResult       suspend_thread_result        = 9;
207:   optional ResumeThreadResult        resume_thread_result         = 10;
53:     LIST_THREADS          = 8;
83:   optional ListThreadsArgs         list_threads_args          = 8;
205:   optional ListThreadsResult         list_threads_result          = 8;
118: message ListThreadsArgs {
121: message SuspendThreadArgs {
124: message ResumeThreadArgs {
231: message ListThreadsResult {
235: message SuspendThreadResult {
238: message ResumeThreadResult {
52:     // thread control
74:   //       not all commands could be executed thread-locally (like write memory)
186:     // Someone tried to resume already running thread.
72:   // execute the command only on those threads
github.com/GNOME/gimp:tools/performance-log-viewer.py: [ mainline, ]
285: Thread = namedtuple ("Thread", ("id", "name", "state", "frames"))
744:                 def thread (id, state = None):
734:                 def match_thread (thread, id, state = None):
2930:         def thread_filter_source_get (self):
2933:         def thread_filter_source_set (self, thread_filter):
2940:         def thread_filter_button_toggled (self, button):
275: class ThreadState (enum.Enum):
1955:     class ThreadStore (Gtk.ListStore):
2237:     def threads_row_activated (self, tree, path, col):
2257:     def threads_selection_changed (self, tree_sel):
2433:     class ThreadFilter (Gtk.TreeView):
2557:     class ThreadPopover (Gtk.Popover):
312:             for thread in element.find ("backtrace").iterfind ("thread"):
313:                 id      = thread.get ("id")
314:                 name    = thread.get ("name")
315:                 running = thread.get ("running")
317:                 t = Thread (
325:                 for frame in thread.iterfind ("frame"):
722:             f = eval ("lambda thread, function, %s: %s" % (
737:                              id == thread.id)                 or  \
739:                              thread.name                      and \
740:                              re.fullmatch (id, thread.name))) and \
742:                             re.fullmatch (state, str (thread.state)))
745:                     return any (match_thread (thread, id, state)
746:                                 for thread in samples[i].backtrace or [])
749:                     for thread in samples[i].backtrace or []:
750:                         if match_thread (thread, id, state):
751:                             for frame in thread.frames:
757:                 if f (thread, function, **{
2024:         self.thread_store = store
2028:         self.thread_tree = tree
2205:         sel_rows = self.thread_tree.get_selection ().get_selected_rows ()[1]
2208:             tid = self.thread_store[sel_rows[0]][self.ThreadStore.ID]
2212:         self.thread_store.clear ()
2215:             thread = samples[i].backtrace[t]
2217:             iter = self.thread_store.append (
2218:                 (t, thread.id, thread.name, str (thread.state))
2221:             if thread.id == tid:
2222:                 self.thread_tree.get_selection ().select_iter (iter)
2238:         iter = self.thread_store.get_iter (path)
2240:         tid = self.thread_store[iter][self.ThreadStore.ID]
2245:             threads = filter (lambda thread:
2246:                                 thread.id    == tid and
2247:                                 thread.state == ThreadState.RUNNING,
2298:             for thread in sample.backtrace or []:
2299:                 for frame in thread.frames:
2446:                 threads = list ({thread.id
2448:                                  for thread in sample.backtrace or ()})
2532:             threads = {thread.id: thread.name
2534:                        for thread in samples[i].backtrace or ()}
2574:             thread_filter = ProfileViewer.ThreadFilter ()
2575:             self.thread_filter = thread_filter
2576:             scrolled.add (thread_filter)
2577:             thread_filter.show ()
2640:                 thread_filter_store = popover.thread_filter.store
2642:                 self.thread_filter_store = thread_filter_store
2643:                 self.thread_filter       = thread_filter_store.get_filter ()
2645:                 history.add_source (self.thread_filter_source_get,
2646:                                     self.thread_filter_source_set)
2652:                 button.connect ("toggled", self.thread_filter_button_toggled)
2762:                 for thread in samples[i].backtrace or []:
2763:                     if thread.state in self.thread_filter[thread.id]:
2764:                         thread_frames = thread.frames
2767:                             thread_frames = reversed (thread_frames)
2772:                         for frame in thread_frames:
2931:             return self.thread_filter_store.get_filter ()
2934:             self.thread_filter = thread_filter
2936:             self.thread_filter_store.set_filter (thread_filter)
2942:                 thread_filter = self.thread_filter_store.get_filter ()
2944:                 if thread_filter != self.thread_filter:
2945:                     self.thread_filter = thread_filter
281:             ThreadState.SUSPENDED: "S",
282:             ThreadState.RUNNING:   "R"
320:                     state  = ThreadState.RUNNING if running and int (running) \
321:                              else ThreadState.SUSPENDED,
2012:         header = Gtk.HeaderBar (title = "Threads", has_subtitle = False)
2023:         store = self.ThreadStore ()
2033:         tree.connect ("row-activated", self.threads_row_activated)
2036:                                        self.threads_selection_changed)
2044:         col.add_attribute (cell, "text", self.ThreadStore.ID)
2052:         col.add_attribute (cell, "text", self.ThreadStore.NAME)
2060:         col.add_attribute (cell, "text", self.ThreadStore.STATE)
2250:             if list (threads):
2438:             STATE   = {list (ThreadState)[i]: 3 + i
2439:                        for i in range (len (ThreadState))}
2449:                 threads.sort ()
2451:                 states = [state == ThreadState.RUNNING for state in self.STATE]
2453:                 for id in threads:
2539:                 if id in threads:
2541:                     row[self.store.NAME]    = threads[id]
2638:                 popover = ProfileViewer.ThreadPopover ()
2659:                 label = Gtk.Label (label = "Threads")
github.com/google/binnavi:debug/client/defs.hpp: [ master, ]
232: struct Thread {
247:   Thread(unsigned int tid, ThreadState state)
136: enum ThreadState {
264:   typedef std::function<bool(const Thread&)> ThreadComparator;
586:   std::vector<Thread> threads;
589:   void addThread(const Thread& thread) {
158:   dbgevt_thread_created,
161:   dbgevt_thread_closed,
266:   static ThreadComparator MakeThreadIdComparator(int tid) {
593:   std::vector<Thread> getThreads() const {
135: // Identifies the state of a thread
137:   // Thread is running
140:   // Thread is suspended
157:   // A new thread was created in the target process
160:   // An existing thread was closed in the target process
230:  * Describes a thread of the target process
233:   //  The thread ID of the thread
236:   // The state of the thread
242:    * Creates a new Thread object
244:    * @param tid The thread ID of the thread
245:    * @param state The state of the thread
252:   bool operator==(const Thread& rhs) const {
256:   bool operator<(const Thread& rhs) const {
261:    * Returns a function which matches the thread with a given id
267:     return [tid](const Thread& t)->bool {return t.tid == tid; };
414:   // Thread ID of the thread where the exception happened
590:     threads.push_back(thread);
717:       std::vector<Thread> threads = registers.getThreads();
719:       for (std::vector<Thread>::iterator Iter = threads.begin();
721:         Thread t = *Iter;
237:   ThreadState state;
594:     return threads;
720:           Iter != threads.end(); ++Iter) {
github.com/apache/beam:website/www/site/themes/docsy/userguide/package-lock.json: [ master, ]
1098:             "needle": "^2.2.1",
1079:         "needle": {
github.com/kubernetes/website:themes/docsy/userguide/package-lock.json: [ master, ]
1098:             "needle": "^2.2.1",
1079:         "needle": {
github.com/GNOME/gimp:app/widgets/gimpdashboard.c: [ mainline, ]
299:   GThread                      *thread;
146:   VARIABLE_ASSIGNED_THREADS,
147:   VARIABLE_ACTIVE_THREADS,
1254:   /* sampler thread
1256:    * we use a separate thread for sampling, so that data is sampled even when
1257:    * the main thread is busy
1259:   priv->thread = g_thread_new ("dashboard",
1374:   if (priv->thread)
1383:       g_clear_pointer (&priv->thread, g_thread_join);
3660:       gint     thread;
3681:           for (thread = 0; thread < n_threads; thread++)
3683:               guintptr thread_id;
3685:               thread_id = gimp_backtrace_get_thread_id (priv->log_backtrace,
3686:                                                         thread);
3688:               if (gimp_backtrace_find_thread_by_id (backtrace,
3689:                                                     thread_id, thread) < 0)
3691:                   const gchar *thread_name;
3695:                   thread_name =
3696:                     gimp_backtrace_get_thread_name (priv->log_backtrace,
3697:                                                     thread);
3700:                                              "<thread id=\"%llu\"",
3701:                                              (unsigned long long) thread_id);
3703:                   if (thread_name)
3707:                       gimp_dashboard_log_print_escaped (dashboard, thread_name);
3720:       for (thread = 0; thread < n_threads; thread++)
3722:           guintptr     thread_id;
3723:           const gchar *thread_name;
3732:           thread_id   = gimp_backtrace_get_thread_id     (backtrace, thread);
3733:           thread_name = gimp_backtrace_get_thread_name   (backtrace, thread);
3735:           running     = gimp_backtrace_is_thread_running (backtrace, thread);
3736:           n_frames    = gimp_backtrace_get_n_frames      (backtrace, thread);
3740:               gint other_thread = gimp_backtrace_find_thread_by_id (
3741:                 priv->log_backtrace, thread_id, thread);
3743:               if (other_thread >= 0)
3748:                   last_running  = gimp_backtrace_is_thread_running (
3749:                     priv->log_backtrace, other_thread);
3751:                     priv->log_backtrace, other_thread);
3758:                                                             thread, i) !=
3760:                                                             other_thread, i))
3772:                                                             thread, -i - 1) !=
3774:                                                             other_thread, -i - 1))
3794:                                      "<thread id=\"%llu\"",
3795:                                      (unsigned long long) thread_id);
3797:           if (thread_name)
3801:               gimp_dashboard_log_print_escaped (dashboard, thread_name);
3834:                                                               thread, frame);
3844:                                          "</thread>\n");
694:   [VARIABLE_ASSIGNED_THREADS] =
695:   { .name             = "assigned-threads",
697:     .description      = N_("Number of assigned worker threads"),
700:     .data             = "assigned-threads"
703:   [VARIABLE_ACTIVE_THREADS] =
704:   { .name             = "active-threads",
706:     .description      = N_("Number of active worker threads"),
709:     .data             = "active-threads"
949:                           { .variable       = VARIABLE_ASSIGNED_THREADS,
952:                           { .variable       = VARIABLE_ACTIVE_THREADS,
3659:       gint     n_threads;
3679:           n_threads = gimp_backtrace_get_n_threads (priv->log_backtrace);
3718:       n_threads = gimp_backtrace_get_n_threads (backtrace);
1260:                                (GThreadFunc) gimp_dashboard_sample,
github.com/google/gapid:gapis/service/service.proto: [ master, ]
101:     Thread thread = 18;
892: message Thread {
78: message Threads {
102:     Threads threads = 19;
79:   repeated path.Thread list = 1;
891: // Thread represents a single thread in the capture.
github.com/apache/pdfbox:pdfbox/src/main/java/org/apache/pdfbox/cos/COSName.java: [ trunk, ]
573:     public static final COSName THREAD = new COSName("Thread");
574:     public static final COSName THREADS = new COSName("Threads");
35:     // using ConcurrentHashMap because this can be accessed by multiple threads
github.com/libgit2/objective-git:External/libgit2/src/pack-objects.c: [ master, ]
1110: 	git_thread thread;
1109: struct thread_params {
1127: static void *threaded_find_deltas(void *arg)
183: unsigned int git_packbuilder_set_threads(git_packbuilder *pb, unsigned int n)
15: #include "thread-utils.h"
155: 	pb->nr_threads = 1; /* do not spawn any thread by default */
1129: 	struct thread_params *me = arg;
1143: 			git_error_set(GIT_ERROR_THREAD, "unable to lock packfile condition mutex");
1152: 		 * condition because the main thread may have set it to 1
1155: 		 * was initialized to 0 before this thread was spawned
1168: 	struct thread_params *p;
1219: 		ret = git_thread_create(&p[i].thread,
1222: 			git_error_set(GIT_ERROR_THREAD, "unable to create thread");
1229: 	 * Now let's wait for work completion.  Each time a thread is done
1231: 	 * thread with the largest number of unprocessed objects and give
1232: 	 * it to that newly idle thread.  This ensure good load balancing
1237: 		struct thread_params *target = NULL;
1238: 		struct thread_params *victim = NULL;
1241: 		/* Start by locating a thread that has transitioned its
1256: 		 * a thread to receive more work. We still need to locate a
1257: 		 * thread from which to steal work (the victim). */
1291: 			git_error_set(GIT_ERROR_THREAD, "unable to lock packfile condition mutex");
1301: 			git_thread_join(&target->thread, NULL);
50: #ifdef GIT_THREADS
62: #endif /* GIT_THREADS */
163: #ifdef GIT_THREADS
187: #ifdef GIT_THREADS
188: 	pb->nr_threads = n;
191: 	assert(1 == pb->nr_threads);
194: 	return pb->nr_threads;
1033: 		 * it anyway, and doing it here while we're threaded will
1034: 		 * save a lot of time in the non threaded write phase,
1107: #ifdef GIT_THREADS
1170: 	int ret, active_threads = 0;
1172: 	if (!pb->nr_threads)
1173: 		pb->nr_threads = git_online_cpus();
1175: 	if (pb->nr_threads <= 1) {
1180: 	p = git__mallocarray(pb->nr_threads, sizeof(*p));
1183: 	/* Partition the work among the threads */
1184: 	for (i = 0; i < pb->nr_threads; ++i) {
1185: 		size_t sub_size = list_size / (pb->nr_threads - i);
1188: 		if (sub_size < 2*window && i+1 < pb->nr_threads)
1211: 	/* Start work threads */
1212: 	for (i = 0; i < pb->nr_threads; ++i) {
1220: 					threaded_find_deltas, &p[i]);
1225: 		active_threads++;
1236: 	while (active_threads) {
1247: 			for (i = 0; !target && i < pb->nr_threads; i++)
1258: 		for (i = 0; i < pb->nr_threads; i++)
1304: 			active_threads--;
1773: #ifdef GIT_THREADS
github.com/tensorflow/tfjs:.vscode/settings.json: [ master, ]
109:     "thread": "cpp",
129:     "__threading_support": "cpp",
github.com/apache/httpcomponents-client:httpclient5/src/main/java/org/apache/hc/client5/http/impl/async/H2AsyncClientBuilder.java: [ master, ]
893:         private final Thread thread;
211:     private ThreadFactory threadFactory;
468:     public final H2AsyncClientBuilder setThreadFactory(final ThreadFactory threadFactory) {
609:      * connection pool using a background thread.
612:      * in order to stop and release the background thread.
896:             this.thread = new DefaultThreadFactory("idle-connection-evictor", true).newThread(() -> {
898:                     while (!Thread.currentThread().isInterrupted()) {
903:                     Thread.currentThread().interrupt();
911:             thread.start();
915:             thread.interrupt();
38: import java.util.concurrent.ThreadFactory;
466:      * Assigns {@link ThreadFactory} instance.
469:         this.threadFactory = threadFactory;
779:                 threadFactory != null ? threadFactory : new DefaultThreadFactory("httpclient-dispatch", true),
876:                 threadFactory != null ? threadFactory : new DefaultThreadFactory("httpclient-main", true),
76: import org.apache.hc.core5.concurrent.DefaultThreadFactory;
github.com/googleapis/google-cloud-ruby:google-cloud-debugger/lib/google/cloud/debugger/breakpoint/evaluator.rb: [ master, ]
169:             Thread => hashify(
549:             Thread => hashify(
548:             ThreadGroup => hashify(%I[enclosed? list]).freeze,
193:               %I[disable_method_trace_for_thread]
567:                 thread_variable?
568:                 thread_variable_get
569:                 thread_variables
764:             Thread.current.thread_variable_get EVALUATOR_REFERENCE
835:           # context binding. The evaluation is done in a separate thread due
837:           # addtional code tracing is disabled in original thread.
849:             # do evaluation in a new thread, where function calls can be
851:             thr = Thread.new do
852:               Thread.current.thread_variable_set EVALUATOR_REFERENCE, self
868:             # Force terminate evaluation thread if not finished already and
946:                   :enable_method_trace_for_thread)
950:                   :disable_method_trace_for_thread)
1018:               :disable_method_trace_for_thread
chromium.googlesource.com/chromium/deps/psyco_win32:psyco/profiler.py: [ master, ]
20:     import dummy_thread as thread
365: def psyco_start_new_thread(callable, args, kw=None):
371: original_start_new_thread = thread.start_new_thread
358: def psyco_thread_stub(callable, args, kw):
18:     import thread
32: # a lock for a thread-safe go()
33: go_lock = thread.allocate_lock()
260:         self.lock = thread.allocate_lock()
343: # and thread.start_new_thread().
366:     "This is the Psyco-aware version of thread.start_new_thread()."
367:     return original_start_new_thread(psyco_thread_stub, (callable, args, kw))
374: thread.start_new_thread = psyco_start_new_thread
375: # hack to patch threading._start_new_thread if the module is
378:     hasattr(sys.modules['threading'], '_start_new_thread')):
379:     sys.modules['threading']._start_new_thread = psyco_start_new_thread
204:             alarm.stop(1)   # wait for parallel threads to stop
377: if ('threading' in sys.modules and
android.googlesource.com/platform/external/lzma:CPP/7zip/UI/Common/Bench.cpp: [ master, ]
658:   NWindows::CThread thread[2];
1601:   NWindows::CThread Thread;
1652:   NWindows::CThread Thread;
772:   HRESULT CreateEncoderThread()
782:   HRESULT CreateDecoderThread(unsigned index, bool callbackMode
660:   UInt32 NumDecoderSubThreads;
736:   static THREAD_FUNC_DECL EncodeThreadFunction(void *param)
759:   static THREAD_FUNC_DECL DecodeThreadFunction(void *param)
1139: static const UInt32 kNumThreadsMax = (1 << 12);
1163: struct CBenchThreadsFlusher
1167:   unsigned NumThreads;
1170:   CBenchThreadsFlusher(): NumThreads(0), NeedClose(false) {}
1172:   ~CBenchThreadsFlusher()
1615: static THREAD_FUNC_DECL FreqThreadFunction(void *param)
1631: struct CFreqThreads
1634:   UInt32 NumThreads;
1636:   CFreqThreads(): Items(NULL), NumThreads(0) {}
1643:   ~CFreqThreads()
1675: static THREAD_FUNC_DECL CrcThreadFunction(void *param)
1689: struct CCrcThreads
1692:   UInt32 NumThreads;
1694:   CCrcThreads(): Items(NULL), NumThreads(0) {}
1701:   ~CCrcThreads()
1957: AString GetProcessThreadsInfo(const NSystem::CProcessAffinity &ti)
2512: static UInt32 GetNumThreadsNext(unsigned i, UInt32 numThreads)
36: #include "../../../Windows/Thread.h"
778:       res = thread[0].Create(EncodeThreadFunction, this);
797:     return thread[index].Create(DecodeThreadFunction, &decoder);
1191:     NWindows::CThread &t = EncodersSpec->encoders[i].thread[0];
1339:     // (g_CrcTable[0] == 0), and (encoder.Salt == 0) for first thread
1475:         encoder.thread[j].Wait();
1492:         if (::GetThreadTimes(encoders[i].thread[j], &creationTime, &exitTime, &kernelTime, &userTime) != 0)
1610:     Thread.Wait();
1611:     Thread.Close();
1670:     Thread.Wait();
1671:     Thread.Close();
2260:       RINOK(info.Thread.Create(FreqThreadFunction, &info));
2395:       RINOK(info.Thread.Create(CrcThreadFunction, &info));
391:     ::GetThreadTimes(::GetCurrentThread()
1365:       RINOK(encoder.CreateEncoderThread())
1453:         HRESULT res = encoder.CreateDecoderThread(j, (i == 0 && j == 0)
1516: static inline UInt64 GetLZMAUsage(bool multiThread, UInt32 dictionary)
1529:       (1 << 20) + (multiThread ? (6 << 20) : 0);
2028:     bool size_Defined, UInt64 size, const char *threadsString, UInt32 numThreads)
2047:   f.Print(threadsString);
2243:   CFreqThreads threads;
2246:     threads.Items = new CFreqInfo[numThreads];
2250:       CFreqInfo &info = threads.Items[i];
2259:       CFreqInfo &info = threads.Items[i];
2261:       threads.NumThreads++;
2263:     threads.WaitAll();
2266:       RINOK(threads.Items[i].CallbackRes);
2351:   CCrcThreads threads;
2354:     threads.Items = new CCrcInfo[numThreads];
2359:       CCrcInfo &info = threads.Items[i];
2394:       CCrcInfo &info = threads.Items[i];
2396:       threads.NumThreads++;
2398:     threads.WaitAll();
2401:       RINOK(threads.Items[i].Res);
2894:   NSystem::CProcessAffinity threadsInfo;
2895:   threadsInfo.InitST();
2899:   if (threadsInfo.Get() && threadsInfo.processAffinityMask != 0)
2900:     numCPUs = threadsInfo.GetNumProcessThreads();
3136:     PrintRequirements(*printCallback, "size: ", ramSize_Defined, ramSize, "CPU hardware threads:", numCPUs);
3137:     printCallback->Print(GetProcessThreadsInfo(threadsInfo));
3290:   // ---------- Threads loop ----------
3291:   for (unsigned threadsPassIndex = 0; threadsPassIndex < 3; threadsPassIndex++)
3298:     if (threadsPassIndex != 0)
3304:     if (threadsPassIndex != 0)
3309:       if (threadsPassIndex == 1)
3325:   if (threadsPassIndex > 0)
3355:   PrintRequirements(f, "usage:", true, GetBenchMemoryUsage(numThreads, dict, totalBenchMode), "Benchmark threads:   ", numThreads);
1061:       RINOK(setCoderMt->SetNumberOfThreads(NumDecoderSubThreads));
1161: // ---------- CBenchThreadsFlusher ----------
1181: WRes CBenchThreadsFlusher::StartAndWait(bool exitMode)
1189:   for (unsigned i = 0; i < NumThreads; i++)
1219:         numThreads
1243:   UInt32 numEncoderThreads = 1;
1244:   UInt32 numSubDecoderThreads = 1;
1247:     numEncoderThreads = numThreads;
1251:       if (numThreads == 1 && method.Get_NumThreads() < 0)
1252:         method.AddProp_NumThreads(1);
1253:       const UInt32 numLzmaThreads = method.Get_Lzma_NumThreads();
1254:       if (numThreads > 1 && numLzmaThreads > 1)
1256:         numEncoderThreads = numThreads / 2;
1257:         numSubDecoderThreads = 2;
1261:   bool mtEncMode = (numEncoderThreads > 1);
1264:   CBenchEncoders encodersSpec(numEncoderThreads);
1269:   for (i = 0; i < numEncoderThreads; i++)
1290:     for (UInt32 j = 0; j < numSubDecoderThreads; j++)
1305:   for (i = 0; i < numEncoderThreads; i++)
1321:   CBenchThreadsFlusher encoderFlusher;
1327:     encoderFlusher.NumThreads = numEncoderThreads;
1333:   for (i = 0; i < numEncoderThreads; i++)
1354:       bpi->BenchInfo.NumIterations = numEncoderThreads;
1378:     for (i = 0; i < numEncoderThreads; i++)
1409:   for (i = 0; i < numEncoderThreads; i++)
1427:   UInt32 numDecoderThreads = numEncoderThreads * numSubDecoderThreads;
1429:   for (i = 0; i < numEncoderThreads; i++)
1438:       bpi->BenchInfo.NumIterations = numDecoderThreads;
1446:       int numSubThreads = method.Get_NumThreads();
1447:       encoder.NumDecoderSubThreads = (numSubThreads <= 0) ? 1 : numSubThreads;
1449:     if (numDecoderThreads > 1)
1451:       for (UInt32 j = 0; j < numSubDecoderThreads; j++)
1455:             , ((i * numSubDecoderThreads + j) * 16 * 21) & 0x7FF
1470:   if (numDecoderThreads > 1)
1471:     for (i = 0; i < numEncoderThreads; i++)
1472:       for (UInt32 j = 0; j < numSubDecoderThreads; j++)
1487:   if (numDecoderThreads > 1)
1488:     for (i = 0; i < numEncoderThreads; i++)
1489:       for (UInt32 j = 0; j < numSubDecoderThreads; j++)
1500:   info.NumIterations = numSubDecoderThreads * encoders[0].NumIterations;
1502:   for (i = 0; i < numEncoderThreads; i++)
1532: UInt64 GetBenchMemoryUsage(UInt32 numThreads, UInt32 dictionary, bool totalBench)
1536:   bool lzmaMt = (totalBench || numThreads > 1);
1537:   UInt32 numBigThreads = numThreads;
1539:     numBigThreads /= 2;
1541:     GetLZMAUsage(lzmaMt, dictionary) + (2 << 20)) * numBigThreads;
1639:     for (UInt32 i = 0; i < NumThreads; i++)
1641:     NumThreads = 0;
1697:     for (UInt32 i = 0; i < NumThreads; i++)
1699:     NumThreads = 0;
1960:   // s.Add_UInt32(ti.numProcessThreads);
1963:     // if (ti.numProcessThreads != ti.numSysThreads)
1966:       s.Add_UInt32(ti.GetNumSystemThreads());
2048:   PrintNumber(f, numThreads, 3);
2162:     UInt32 numThreads,
2191:         false, numThreads, method,
2216:     UInt32 numThreads,
2228:   if (numThreads == 0)
2229:     numThreads = 1;
2232:   numThreads = 1;
2244:   if (numThreads > 1)
2248:     for (i = 0; i < numThreads; i++)
2257:     for (i = 0; i < numThreads; i++)
2264:     for (i = 0; i < numThreads; i++)
2292:       UInt64 numCommands = (UInt64)numIterations * bufferSize * numThreads * complexity;
2294:       cpuFreq = rating / numThreads;
2311:     UInt32 numThreads, UInt32 bufferSize,
2320:   if (numThreads == 0)
2321:     numThreads = 1;
2324:   numThreads = 1;
2336:   size_t totalSize = (size_t)bufferSize * numThreads;
2337:   if (totalSize / numThreads != bufferSize)
2352:   if (numThreads > 1)
2357:     for (i = 0; i < numThreads; i++)
2392:     for (i = 0; i < numThreads; i++)
2399:     for (i = 0; i < numThreads; i++)
2428:   UInt64 unpSizeThreads = unpSize * numThreads;
2429:   info.UnpackSize = unpSizeThreads;
2430:   info.PackSize = unpSizeThreads;
2436:       UInt64 numCommands = unpSizeThreads * complexity / 256;
2445:   speed = info.GetSpeed(unpSizeThreads);
2453:     UInt32 numThreads, UInt32 bufSize,
2475:         numThreads, bufSize,
2518:   return (num <= numThreads) ? num : numThreads;
2908:   UInt32 numThreadsSpecified = numCPUs;
2914:   bool multiThreadTests = false;
2997:         multiThreadTests = true;
3001:       RINOK(ParseMtProp(s, propVariant, numCPUs, numThreadsSpecified));
3141:   if (numThreadsSpecified < 1 || numThreadsSpecified > kNumThreadsMax)
3159:         true, numThreadsSpecified,
3217:     unsigned numThreadsTests = 0;
3220:       UInt32 t = GetNumThreadsNext(numThreadsTests, numThreadsSpecified);
3222:       numThreadsTests++;
3223:       if (t >= numThreadsSpecified)
3228:     CTempValues speedTotals(numThreadsTests);
3230:       for (unsigned ti = 0; ti < numThreadsTests; ti++)
3250:         for (unsigned ti = 0; ti < numThreadsTests; ti++)
3253:           UInt32 t = GetNumThreadsNext(ti, numThreadsSpecified);
3271:       for (unsigned ti = 0; ti < numThreadsTests; ti++)
3294:   UInt32 numThreads = numThreadsSpecified;
3296:   if (!multiThreadTests)
3303:     numThreads = 1;
3308:       numThreads = numCPUs;
3312:           numThreads = numCPUs / 2;
3342:       if (GetBenchMemoryUsage(numThreads, ((UInt32)1 << dicSizeLog), totalBenchMode) + (8 << 20) <= ramSize)
3453:         RINOK(FreqBench(complexInCommands, numThreads, printCallback,
3472:             complexInCommands, numThreads,
3480:       res = TotalBench_Hash(EXTERNAL_CODECS_LOC_VARS complexInCommands, numThreads,
3489:         RINOK(FreqBench(complexInCommands, numThreads, printCallback,
3579:           true, numThreads,
android.googlesource.com/platform/external/rust/crates/parking_lot_core:src/parking_lot.rs: [ master, ]
1289:         Thread(*const ThreadData),
1164:         thread_id: usize,
1170:         pub fn thread_id(&self) -> usize {
1191:         thread_id: usize,
151: struct ThreadData {
175: impl ThreadData {
210: impl Drop for ThreadData {
1163:     pub struct DeadlockedThread {
1168:     impl DeadlockedThread {
197: fn with_thread_data<T>(f: impl FnOnce(&ThreadData) -> T) -> T {
43: static NUM_THREADS: AtomicUsize = AtomicUsize::new(0);
498:     pub unparked_threads: usize,
501:     pub requeued_threads: usize,
505:     pub have_more_threads: bool,
1555:         num_threads: usize,
7: use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT};
53: // Even with 3x more buckets than threads, the memory overhead per thread is
54: // still only a few hundred bytes per thread.
154:     // Key that this thread is sleeping on. This may change if the thread is
161:     // UnparkToken passed to this thread when it is unparked
164:     // ParkToken value set by the thread when it was parked
167:     // Is the thread parked with a timeout?
195: // Invokes the given closure with a reference to the current thread `ThreadData`.
199:     // to construct. Try to use a thread-local version if possible. Otherwise just
201:     let mut thread_data_storage = None;
202:     thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
203:     let thread_data_ptr = THREAD_DATA
205:         .unwrap_or_else(|_| thread_data_storage.get_or_insert_with(ThreadData::new));
207:     f(unsafe { &*thread_data_ptr })
239:     // If this fails then it means some other thread created the hash table first.
263: // created, which only happens once per thread.
279:         // Now check if our table is still the latest one. Another thread could
305:     // any other thread trying to grow the hash table is blocked on the bucket
368:         // If no other thread has rehashed the table before we grabbed the lock
433:         // If no other thread has rehashed the table before we grabbed the lock
472:     /// We were unparked by another thread with the given token.
483:     /// Returns true if we were unparked by another thread.
504:     /// true if a thread was unparked.
522:     /// Unpark one thread and requeue the rest onto the target queue.
528:     /// Unpark one thread and leave the rest parked. No requeuing is done.
531:     /// Requeue one thread and leave the rest parked on the original queue.
535: /// Operation that `unpark_filter` should perform for each thread.
538:     /// Unpark the thread and continue scanning the list of parked threads.
541:     /// Don't unpark the thread and continue scanning the list of parked threads.
544:     /// Don't unpark the thread and stop scanning the list of parked threads.
548: /// A value which is passed from an unparker to a parked thread.
552: /// A value associated with a parked thread which can be used by `unpark_filter`.
562: /// Parks the current thread in the queue associated with the given key.
566: /// current thread is appended to the queue and the queue is unlocked.
569: /// the thread is put to sleep. The thread will then sleep until it is unparked
576: /// whether it was the last thread in the queue.
599:     // Grab our thread data, this also ensures that the hash table exists
600:     with_thread_data(|thread_data| {
611:         // Append our thread data to the queue and unlock the bucket
612:         thread_data.parked_with_timeout.set(timeout.is_some());
613:         thread_data.next_in_queue.set(ptr::null());
614:         thread_data.key.store(key, Ordering::Relaxed);
615:         thread_data.park_token.set(park_token);
616:         thread_data.parker.prepare_park();
618:             (*bucket.queue_tail.get()).next_in_queue.set(thread_data);
620:             bucket.queue_head.set(thread_data);
622:         bucket.queue_tail.set(thread_data);
629:         // Park our thread and determine whether we were woken up by an unpark
633:             Some(timeout) => thread_data.parker.park_until(timeout),
635:                 thread_data.parker.park();
637:                 deadlock::on_unpark(thread_data);
644:             return ParkResult::Unparked(thread_data.unpark_token.get());
649:         let (key, bucket) = lock_bucket_checked(&thread_data.key);
653:         if !thread_data.parker.timed_out() {
656:             return ParkResult::Unparked(thread_data.unpark_token.get());
659:         // We timed out, so we now need to remove our thread from the queue
663:         let mut was_last_thread = true;
665:             if current == thread_data {
676:                             was_last_thread = false;
684:                 // last thread on the queue.
685:                 timed_out(key, was_last_thread);
689:                     was_last_thread = false;
697:         // There should be no way for our thread to have been removed from the queue
708: /// Unparks one thread from the queue associated with the given key.
711: /// target thread is woken up. The `UnparkResult` argument to the function
712: /// indicates whether a thread was found in the queue and whether this was the
713: /// last thread in the queue. This value is also returned by `unpark_one`.
716: /// passed to the thread that is unparked. If no thread is unparked then the
739:     // Find a thread with a matching key and remove it from the queue
746:             // Remove the thread from the queue
764:             // Invoke the callback before waking up the thread
769:             // Set the token for the target thread
773:             // the thread from exiting and freeing its ThreadData if its wait
776:             // up the parked thread.
824:             // Remove the thread from the queue
831:             // Set the token for the target thread
871: /// indicating whether a thread was unparked and whether there are threads still
876: /// passed to the thread that is unparked. If no thread is unparked then the
912:     let mut wakeup_thread = None;
915:             // Remove the thread from the queue
922:             // Prepare the first thread for wakeup and requeue the rest.
924:                 && wakeup_thread.is_none()
926:                 wakeup_thread = Some(current);
972:     // Invoke the callback before waking up the thread
979:     if let Some(wakeup_thread) = wakeup_thread {
980:         (*wakeup_thread).unpark_token.set(token);
981:         let handle = (*wakeup_thread).parker.unpark_lock();
995: /// `ParkToken` associated with each thread.
997: /// The `filter` function is called for each thread in the queue or until
999: /// associated with a particular thread, which is unparked if `FilterOp::Unpark`
1008: /// passed to all threads that are unparked. If no thread is unparked then the
1036:             // Call the filter function with the thread's ParkToken
1040:                     // Remove the thread from the queue
1046:                     // Add the thread to our list of threads to unpark
1122:     /// Panics if the resource was already released or wasn't acquired in this thread.
1150:     use super::{get_hashtable, lock_bucket, with_thread_data, ThreadData, NUM_THREADS};
1151:     use crate::thread_parker::{ThreadParkerT, UnparkHandleT};
1160:     use thread_id;
1162:     /// Representation of a deadlocked thread
1169:         /// The system thread id
1171:             self.thread_id
1174:         /// The thread backtrace
1190:         // System thread id
1200:                 thread_id: thread_id::get(),
1210:                     thread_id: td.deadlock_data.thread_id,
1220:             unreachable!("unparked deadlocked thread!");
1225:         with_thread_data(|thread_data| {
1226:             (*thread_data.deadlock_data.resources.get()).push(key);
1231:         with_thread_data(|thread_data| {
1232:             let resources = &mut (*thread_data.deadlock_data.resources.get());
1261:         let thread_count = NUM_THREADS.load(Ordering::Relaxed);
1262:         let mut graph = DiGraphMap::<usize, ()>::with_capacity(thread_count * 2, thread_count * 2);
1296:     // Returns all detected thread wait cycles.
1309:             // Now check if our table is still the latest one. Another thread could
1325:         let thread_count = NUM_THREADS.load(Ordering::Relaxed);
1327:             DiGraphMap::<WaitGraphNode, ()>::with_capacity(thread_count * 2, thread_count * 2);
1337:                         graph.add_edge(Resource(resource), Thread(current), ());
1341:                         Thread(current),
1369:                 // unpark the deadlocked thread!
1400:     // returns all thread cycles in the wait graph
1411:             .filter(|n| if let &Thread(_) = n { true } else { false });
1414:             DfsEvent::Discover(Thread(n), _) => path.push(n),
1415:             DfsEvent::Finish(Thread(_), _) => {
1418:             DfsEvent::BackEdge(_, Thread(n)) => {
1438:         thread,
1529:                 threads.push(thread::spawn(move || test.run()));
1535:             thread::sleep(delay);
1543:             for thread in threads {
1544:                 thread.join().expect("Test thread panic");
1552:         /// Holds the pointer to the last *unprocessed* woken up thread.
1573:             // Report back to the test verification code that this thread woke up
1574:             let this_thread_ptr = super::with_thread_data(|t| t as *const _ as *mut _);
1575:             self.last_awoken.store(this_thread_ptr, Ordering::SeqCst);
1585:             for_each(self.semaphore_addr(), |thread_data| {
1586:                 queue.push(thread_data as *const _ as *mut _);
1594:             // Wait for a parked thread to wake up and update num_awake + last_awoken.
1596:                 thread::yield_now();
1599:             // At this point the other thread should have set last_awoken inside the run() method
1604:                     "Woke up wrong thread:\n\tqueue: {:?}\n\tlast awoken: {:?}",
1619:                 for_each(self.semaphore_addr(), |_thread_data| {
1635:                     thread::yield_now()
1643:             // Make sure no thread is parked on our semaphore address
1645:             for_each(self.semaphore_addr(), |_thread_data| {
1681:                 // the thread we want to pass ownership to has decremented the semaphore counter,
70:     fn new(num_threads: usize, prev: *const HashTable) -> Box<HashTable> {
71:         let new_size = (num_threads * LOAD_FACTOR).next_power_of_two();
94:     // Linked list of threads waiting on this bucket
95:     queue_head: Cell<*const ThreadData>,
96:     queue_tail: Cell<*const ThreadData>,
152:     parker: ThreadParker,
158:     // Linked list of parked threads in a bucket
159:     next_in_queue: Cell<*const ThreadData>,
176:     fn new() -> ThreadData {
177:         // Keep track of the total number of live ThreadData objects and resize
179:         let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1;
180:         grow_hashtable(num_threads);
182:         ThreadData {
183:             parker: ThreadParker::new(),
198:     // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive
200:     // create a ThreadData on the stack
204:         .try_with(|x| x as *const ThreadData)
212:         NUM_THREADS.fetch_sub(1, Ordering::Relaxed);
261: // Grow the hash table so that it is big enough for the given number of threads.
262: // This isn't performance-critical since it is only done when a ThreadData is
264: fn grow_hashtable(num_threads: usize) {
270:         if table.entries.len() >= LOAD_FACTOR * num_threads {
294:     let mut new_table = HashTable::new(num_threads, old_table);
299:         // lists. All `ThreadData` instances in these lists will remain valid as long as they are
300:         // present in the lists, meaning as long as their threads are parked.
316: /// Iterate through all `ThreadData` objects in the bucket and insert them into the given table
322: /// `ThreadData` instances that must stay valid at least as long as the given `table` is in use.
326:     let mut current: *const ThreadData = bucket.queue_head.get();
497:     /// The number of threads that were unparked.
500:     /// The number of threads that were requeued.
503:     /// Whether there are any threads remaining in the queue. This only returns
525:     /// Requeue all threads onto the target queue.
757:                         result.have_more_threads = true;
765:             result.unparked_threads = 1;
772:             // This is a bit tricky: we first lock the ThreadParker to prevent
790:     // No threads with a matching key were found in the bucket
797: /// Unparks all threads in the queue associated with the given key.
799: /// The given `UnparkToken` is passed to all unparked threads.
801: /// This function returns the number of threads that were unparked.
817:     // Remove all threads with the given key in the bucket
821:     let mut threads = SmallVec::<[_; 8]>::new();
834:             // Don't wake up threads while holding the queue lock. See comment
835:             // in unpark_one. For now just record which threads we need to wake
837:             threads.push((*current).parker.unpark_lock());
850:     // Now that we are outside the lock, wake up all the threads that we removed
852:     let num_threads = threads.len();
853:     for handle in threads.into_iter() {
857:     num_threads
860: /// Removes all threads from the queue associated with `key_from`, optionally
906:     // Remove all threads with the given key in the source bucket
910:     let mut requeue_threads: *const ThreadData = ptr::null();
911:     let mut requeue_threads_tail: *const ThreadData = ptr::null();
927:                 result.unparked_threads = 1;
929:                 if !requeue_threads.is_null() {
930:                     (*requeue_threads_tail).next_in_queue.set(current);
932:                     requeue_threads = current;
934:                 requeue_threads_tail = current;
936:                 result.requeued_threads += 1;
944:                         result.have_more_threads = true;
959:     // Add the requeued threads to the destination bucket
960:     if !requeue_threads.is_null() {
961:         (*requeue_threads_tail).next_in_queue.set(ptr::null());
965:                 .set(requeue_threads);
967:             bucket_to.queue_head.set(requeue_threads);
969:         bucket_to.queue_tail.set(requeue_threads_tail);
973:     if result.unparked_threads != 0 {
993: /// Unparks a number of threads from the front of the queue associated with
1003: /// passed an `UnparkResult` indicating the number of threads that were unparked
1004: /// and whether there are still parked threads in the queue. This `UnparkResult`
1028:     // Go through the queue looking for threads with a matching key
1032:     let mut threads = SmallVec::<[_; 8]>::new();
1047:                     threads.push((current, None));
1052:                     result.have_more_threads = true;
1058:                     result.have_more_threads = true;
1069:     // Invoke the callback before waking up the threads
1070:     result.unparked_threads = threads.len();
1071:     if result.unparked_threads != 0 {
1076:     // Pass the token to all threads that are going to be unparked and prepare
1078:     for t in threads.iter_mut() {
1086:     // Now that we are outside the lock, wake up all the threads that we removed
1088:     for (_, handle) in threads.into_iter() {
1134:     /// Each cycle consist of a vector of `DeadlockedThread`.
1137:     pub fn check_deadlock() -> Vec<Vec<deadlock_impl::DeadlockedThread>> {
1142:     pub(super) unsafe fn on_unpark(_td: &super::ThreadData) {
1188:         backtrace_sender: UnsafeCell<Option<mpsc::Sender<DeadlockedThread>>>,
1205:     pub(super) unsafe fn on_unpark(td: &ThreadData) {
1209:                 .send(DeadlockedThread {
1236:             // ThreadData has already been freed. There isn't much we can do
1244:     pub fn check_deadlock() -> Vec<Vec<DeadlockedThread>> {
1256:     // Simple algorithm that builds a wait graph f the threads and the resources,
1298:     unsafe fn check_wait_graph_slow() -> Vec<Vec<DeadlockedThread>> {
1401:     fn graph_cycles(g: &DiGraphMap<WaitGraphNode, ()>) -> Vec<Vec<*const ThreadData>> {
1408:         // start from threads to get the correct threads cycle
1409:         let threads = g
1413:         depth_first_search(g, threads, |e| match e {
1431:     use super::{ThreadData, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
1442:     /// Calls a closure for every `ThreadData` currently parked on a given key
1443:     fn for_each(key: usize, mut f: impl FnMut(&ThreadData)) {
1446:         let mut current: *const ThreadData = bucket.queue_head.get();
1464:             threads: $threads:expr,
1471:                     run_parking_test($latches, delay, $threads, $single_unparks);
1479:             repeats: 10000, latches: 1, delay: 0, threads: 1, single_unparks: 0
1482:             repeats: 100, latches: 1, delay: 0, threads: 100, single_unparks: 0
1485:             repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 1
1488:             repeats: 20, latches: 1, delay: 0, threads: 100, single_unparks: 100
1491:             repeats: 50, latches: 1, delay: 0, threads: 100, single_unparks: 50
1494:             repeats: 100, latches: 1, delay: 10000, threads: 1, single_unparks: 0
1497:             repeats: 100, latches: 1, delay: 10000, threads: 100, single_unparks: 0
1500:             repeats: 10, latches: 1, delay: 10000, threads: 1, single_unparks: 1
1503:             repeats: 1, latches: 1, delay: 10000, threads: 50, single_unparks: 50
1506:             repeats: 2, latches: 1, delay: 10000, threads: 100, single_unparks: 50
1509:             repeats: 100, latches: 100, delay: 0, threads: 1, single_unparks: 0
1512:             repeats: 1, latches: 100, delay: 10000, threads: 1, single_unparks: 0
1519:         num_threads: usize,
1525:             let test = Arc::new(SingleLatchTest::new(num_threads));
1526:             let mut threads = Vec::with_capacity(num_threads);
1527:             for _ in 0..num_threads {
1531:             tests.push((test, threads));
1541:         for (test, threads) in tests {
1553:         last_awoken: AtomicPtr<ThreadData>,
1554:         /// Total number of threads participating in this test.
1559:         pub fn new(num_threads: usize) -> Self {
1565:                 num_threads,
1584:             let mut queue: Vec<*mut ThreadData> = Vec::with_capacity(self.num_threads);
1588:             assert!(queue.len() <= self.num_threads - single_unpark_index);
1612:             // The amount of threads not unparked via unpark_one
1613:             let mut num_threads_left = self.num_threads.checked_sub(num_single_unparks).unwrap();
1615:             // Wake remaining threads up with unpark_all. Has to be in a loop, because there might
1616:             // still be threads that has not yet parked.
1617:             while num_threads_left > 0 {
1622:                 assert!(num_waiting_on_address <= num_threads_left);
1629:                 assert!(num_unparked <= num_threads_left);
1631:                 // Wait for all unparked threads to wake up and update num_awake + last_awoken.
1638:                 num_threads_left = num_threads_left.checked_sub(num_unparked).unwrap();
1640:             // By now, all threads should have been woken up
1641:             assert_eq!(self.num_awake.load(Ordering::SeqCst), self.num_threads);
1686:                             .unparked_threads
1690:                         i => panic!("Should not wake up {} threads", i),
gerrit.googlesource.com/plugins/image-diff:package-lock.json: [ master, ]
1435:                 "needle": "^2.2.1",
1410:         "needle": {
1412:             "resolved": "https://registry.npmjs.org/needle/-/needle-2.5.0.tgz",
android.googlesource.com/platform/test/dittosuite:schema/benchmark.proto: [ master, ]
90: message Thread {
96:   repeated Thread threads = 1;
95: message Multithreading {
113:     Multithreading multithreading = 11;
android.googlesource.com/platform/external/opencv3:samples/gpu/stereo_multi.cpp: [ master, ]
31: class Thread
53:     Thread(void (*func)(void* userData), void* userData)
78: class Thread
99:     Thread(void (*func)(void* userData), void* userData)
49:     HANDLE thread_;
67:     ~Thread()
107:     ~Thread()
50:     DWORD threadId_;
95:     pthread_t thread_;
167: class StereoMultiGpuThread
196: StereoMultiGpuThread::StereoMultiGpuThread()
205: StereoMultiGpuThread::~StereoMultiGpuThread()
39:     static DWORD WINAPI WinThreadFunction(LPVOID lpParam)
86:     static void* PThreadFunction(void* lpParam)
27: // Thread
58:         thread_ = CreateThread(
61:             WinThreadFunction,      // thread function name
62:             &userData_,             // argument to thread function
64:             &threadId_);            // returns the thread identifier
69:         CloseHandle(thread_);
74:         WaitForSingleObject(thread_, INFINITE);
104:         pthread_create(&thread_, NULL, PThreadFunction, &userData_);
109:         pthread_detach(thread_);
114:         pthread_join(thread_, NULL);
249:     Thread thread0(launchGpuStereoAlg, &launchDatas[0]);
250:     Thread thread1(launchGpuStereoAlg, &launchDatas[1]);
252:     thread0.wait();
253:     thread1.wait();
273: // Run Stereo algorithm on two GPUs from single host thread using async API
426:     cout << "| Frame | GPU 0 ms | GPU 1 ms | Multi Thread ms | Multi Stream ms |" << endl;
8:     #include <pthread.h>
28: // OS-specific wrappers for multi-threading
164: // StereoMultiGpuThread
165: // Run Stereo algorithm on two GPUs using different host threads
170:     StereoMultiGpuThread();
171:     ~StereoMultiGpuThread();
220: void StereoMultiGpuThread::compute(const Mat& leftFrame, const Mat& rightFrame, Mat& disparity)
256: void StereoMultiGpuThread::launchGpuStereoAlg(void* userData)
410:     StereoMultiGpuThread multiThreadAlg;
415:     Mat disparityMultiThread;
465:                                disparityMultiThread);
484:         resize(disparityMultiThread, disparityMultiThreadShow, Size(1024, 768), 0, 0, INTER_AREA);
489:         imshow("disparityMultiThread", disparityMultiThreadShow);
420:     Mat disparityMultiThreadShow;
464:         multiThreadAlg.compute(leftGrayFrame.createMatHeader(), rightGrayFrame.createMatHeader(),
468:         const double multiThreadTime = tm.getTimeMilli();
479:              << setw(15) << setprecision(1) << fixed << multiThreadTime << " | "
android.googlesource.com/platform/external/pthreads:implement.h: [ master, ]
130:   DWORD thread;
444:   ptw32_thread_t * thread;
131:   HANDLE threadH;		/* Win32 thread handle - POSIX thread is invalid if threadH == 0 */
139:   pthread_mutex_t threadLock;	/* Used for serialised access to public thread state */
210:   pthread_t ownerThread;
271:   void *threads;
275: typedef struct ThreadParms ThreadParms;
276: typedef struct ThreadKeyAssoc ThreadKeyAssoc;
278: struct ThreadParms
344: struct ThreadKeyAssoc
447:   ThreadKeyAssoc *nextThread;
449:   ThreadKeyAssoc *prevThread;
123: typedef struct ptw32_thread_t_ ptw32_thread_t;
125: struct ptw32_thread_t_
522: #define PTW32_THREAD_REUSE_EMPTY ((ptw32_thread_t *) 1)
162: struct pthread_attr_t_
198: struct pthread_mutex_t_
215: struct pthread_mutexattr_t_
242: struct pthread_spinlock_t_
252: struct pthread_barrier_t_
261: struct pthread_barrierattr_t_
266: struct pthread_key_t_
286: struct pthread_cond_t_
304: struct pthread_condattr_t_
311: struct pthread_rwlock_t_
322: struct pthread_rwlockattr_t_
108:   PThreadStateInitial = 0,	/* Thread not running                   */
109:   PThreadStateRunning,		/* Thread alive & kicking               */
110:   PThreadStateSuspended,	/* Thread alive but suspended           */
111:   PThreadStateCancelPending,	/* Thread alive but is                  */
113:   PThreadStateCanceling,	/* Thread alive but is                  */
116:   PThreadStateException,	/* Thread alive but exiting             */
118:   PThreadStateLast
120: PThreadState;
696: #define _beginthreadex(security, \
709: #define _endthreadex ExitThread
104:    * This enumeration represents the state of the thread;
105:    * The thread is still "alive" if the numeric value of the
132:   pthread_t ptHandle;		/* This thread's permanent pthread_t handle */
133:   ptw32_thread_t * prevReuse;	/* Links threads on reuse stack */
206:   int recursive_count;		/* Number of unlocks a thread needs to perform
348:    *      This structure creates an association between a thread and a key.
350:    *      destroy routine for thread specific data registered by a user upon
351:    *      exiting a thread.
357:    *         T - Thread that has called pthread_setspecific(Kn)
358:    *            (head of chain is thread->keys)
377:    *      general lock (guarding the row) and the thread's general
381:    *      be released - both the key must be deleted and the thread
383:    *      allows the resources to be freed as soon as either thread or
387:    *      and thread locks are always acquired in the order: key lock
388:    *      then thread lock. An exception to this exists when a thread
392:    *      An association is created when a thread first calls
397:    *      thread calls the key destructor function on thread exit, or
401:    *      thread
402:    *              reference to the thread that owns the
404:    *              thread struct itself. Since the association is
405:    *              destroyed before the thread exits, this can never
406:    *              point to a different logical thread to the one that
407:    *              created the assoc, i.e. after thread struct reuse.
434:    *      1)      As soon as either the key or the thread is no longer
521: /* Thread Reuse stack bottom marker. Must not be NULL or any valid pointer to memory. */
525: extern ptw32_thread_t * ptw32_threadReuseTop;
526: extern ptw32_thread_t * ptw32_threadReuseBottom;
540: extern CRITICAL_SECTION ptw32_thread_reuse_lock;
595:   void ptw32_threadReusePush (pthread_t thread);
599:   int ptw32_setthreadpriority (pthread_t thread, int policy, int priority);
610:   void ptw32_callUserDestroyRoutines (pthread_t thread);
612:   int ptw32_tkAssocCreate (ptw32_thread_t * thread, pthread_key_t key);
6:  * Keeps all the internals out of pthread.h
10:  *      Pthreads-win32 - POSIX Threads Library for Win32
141:   pthread_mutex_t cancelLock;	/* Used for async-cancel safety */
188:   pthread_mutex_t lock;
212: 				   threads. */
248:     pthread_mutex_t mutex;	/* mutex if single cpu.            */
270:   pthread_mutex_t keyLock;
280:   pthread_t tid;
288:   long nWaitersBlocked;		/* Number of threads blocked            */
289:   long nWaitersGone;		/* Number of threads timed out          */
290:   long nWaitersToUnblock;	/* Number of threads to unblock         */
291:   sem_t semBlockQueue;		/* Queue up threads waiting for the     */
296:   pthread_mutex_t mtxUnblockLock;	/* Mutex that guards access to          */
299:   pthread_cond_t next;		/* Doubly linked list                   */
300:   pthread_cond_t prev;
313:   pthread_mutex_t mtxExclusiveAccess;
314:   pthread_mutex_t mtxSharedAccessCompleted;
315:   pthread_cond_t cndSharedAccessCompleted;
356:    *            (head of chain is key->threads)
393:    *      pthread_setspecific() on a key that has a specified
413:    *              The pthread_t->keys attribute is the head of a
416:    *              between a pthread_t and all pthread_key_t on which
417:    *              it called pthread_setspecific.
422:    *      nextThread
423:    *              The pthread_key_t->threads attribute is the head of
426:    *              relationship between a pthread_key_t and all the 
427:    *              PThreads that have called pthread_setspecific for
428:    *              this pthread_key_t.
430:    *      prevThread
439:    *              pthread_setspecific if the user provided a
445:   pthread_key_t key;
446:   ThreadKeyAssoc *nextKey;
448:   ThreadKeyAssoc *prevKey;
518: /* Declared in pthread_cancel.c */
527: extern pthread_key_t ptw32_selfThreadKey;
528: extern pthread_key_t ptw32_cleanupKey;
529: extern pthread_cond_t ptw32_cond_list_head;
530: extern pthread_cond_t ptw32_cond_list_tail;
548: extern int pthread_count;
564:   int ptw32_is_attr (const pthread_attr_t * attr);
566:   int ptw32_cond_check_need_init (pthread_cond_t * cond);
567:   int ptw32_mutex_check_need_init (pthread_mutex_t * mutex);
568:   int ptw32_rwlock_check_need_init (pthread_rwlock_t * rwlock);
581: 			       HANDLE threadH, DWORD callback_arg);
587:   void ptw32_threadDestroy (pthread_t tid);
591:   pthread_t ptw32_new (void);
593:   pthread_t ptw32_threadReusePop (void);
608:     ptw32_threadStart (void *vthreadParms);
614:   void ptw32_tkAssocDestroy (ThreadKeyAssoc * assoc);
649:   _CRTIMP unsigned long __cdecl _beginthread (void (__cdecl *) (void *),
651:   _CRTIMP void __cdecl _endthread (void);
689: #if defined(__CYGWIN32__) || defined(__CYGWIN__) || defined(NEED_CREATETHREAD)
692:  * Macro uses args so we can cast start_proc to LPTHREAD_START_ROUTINE
702:         CreateThread(security, \
704:                      (LPTHREAD_START_ROUTINE) start_proc, \
711: #endif				/* __CYGWIN32__ || __CYGWIN__ || NEED_CREATETHREAD */
12:  *      Copyright(C) 1999,2005 Pthreads-win32 contributors
20:  *      http://sources.redhat.com/pthreads-win32/contributors.html
106:    * state is greater or equal "PThreadStateRunning".
134:   volatile PThreadState state;
425:    *              nextThreads link. This chain provides the 1 to many
652:   _CRTIMP unsigned long __cdecl _beginthreadex (void *, unsigned,
655:   _CRTIMP void __cdecl _endthreadex (unsigned);
685:  * Question 1 - How do I get pthreads-win32 to link under Cygwin or Mingw32?
android.googlesource.com/platform/external/rust/crates/memchr:src/memmem/mod.rs: [ master, ]
520:     pub fn needle(&self) -> &[u8] {
651:     pub fn needle(&self) -> &[u8] {
712:     needle: CowBytes<'n>,
836:     fn needle(&self) -> &[u8] {
1006:     needle: CowBytes<'n>,
1045:     fn needle(&self) -> &[u8] {
730: pub(crate) struct NeedleInfo {
987: impl NeedleInfo {
7: the empty needle, the standard library reports matches only at valid UTF-8
51: # Example: repeating a search for the same needle
54: measurable in some workloads. In cases where the same needle is used to search
103:                 needle: Vec<u8>
105:                 proptests::matches_naive(false, &haystack, &needle, $fwd)
118:                 needle: Vec<u8>
120:                 proptests::matches_naive(true, &haystack, &needle, $rev)
167: /// with respect to both the needle and the haystack. That is, this runs
168: /// in `O(needle.len() + haystack.len())` time.
190:     needle: &'n N,
192:     FindIter::new(haystack, Finder::new(needle))
201: /// with respect to both the needle and the haystack. That is, this runs
202: /// in `O(needle.len() + haystack.len())` time.
224:     needle: &'n N,
226:     FindRevIter::new(haystack, FinderRev::new(needle))
229: /// Returns the index of the first occurrence of the given needle.
231: /// Note that if you're are searching for the same needle in many different
238: /// with respect to both the needle and the haystack. That is, this runs
239: /// in `O(needle.len() + haystack.len())` time.
257: pub fn find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
259:         rabinkarp::find(haystack, needle)
261:         Finder::new(needle).find(haystack)
265: /// Returns the index of the last occurrence of the given needle.
267: /// Note that if you're are searching for the same needle in many different
274: /// with respect to both the needle and the haystack. That is, this runs
275: /// in `O(needle.len() + haystack.len())` time.
294: pub fn rfind(haystack: &[u8], needle: &[u8]) -> Option<usize> {
296:         rabinkarp::rfind(haystack, needle)
298:         FinderRev::new(needle).rfind(haystack)
307: /// needle.
342:                 self.pos = pos + core::cmp::max(1, self.finder.needle().len());
354: /// needle.
359:     /// When searching with an empty needle, this gets set to `None` after
398: /// A single substring searcher fixed to a particular needle.
403: /// concern when it's necessary to re-use the same needle to search multiple
410: /// the lifetime of its needle.
417:     /// Create a new finder for the given needle.
419:     pub fn new<B: ?Sized + AsRef<[u8]>>(needle: &'n B) -> Finder<'n> {
420:         FinderBuilder::new().build_forward(needle)
423:     /// Returns the index of the first occurrence of this needle in the given
429:     /// with respect to both the needle and the haystack. That is, this runs
430:     /// in `O(needle.len() + haystack.len())` time.
456:     /// with respect to both the needle and the haystack. That is, this runs
457:     /// in `O(needle.len() + haystack.len())` time.
486:     /// borrows the needle.
489:     /// this copies the needle.
505:     /// needle itself. Namely, a finder's needle can be either borrowed or
506:     /// owned, so the lifetime of the needle returned must necessarily be the
513:     /// Returns the needle that this finder searches for.
515:     /// Note that the lifetime of the needle returned is tied to the lifetime
517:     /// finder's needle can be either borrowed or owned, so the lifetime of the
518:     /// needle returned must necessarily be the shorter of the two.
521:         self.searcher.needle()
525: /// A single substring reverse searcher fixed to a particular needle.
530: /// concern when it's necessary to re-use the same needle to search multiple
537: /// the lifetime of its needle.
544:     /// Create a new reverse finder for the given needle.
546:     pub fn new<B: ?Sized + AsRef<[u8]>>(needle: &'n B) -> FinderRev<'n> {
547:         FinderBuilder::new().build_reverse(needle)
550:     /// Returns the index of the last occurrence of this needle in the given
559:     /// with respect to both the needle and the haystack. That is, this runs
560:     /// in `O(needle.len() + haystack.len())` time.
587:     /// with respect to both the needle and the haystack. That is, this runs
588:     /// in `O(needle.len() + haystack.len())` time.
617:     /// borrows the needle.
620:     /// this copies the needle.
636:     /// needle itself. Namely, a finder's needle can be either borrowed or
637:     /// owned, so the lifetime of the needle returned must necessarily be the
644:     /// Returns the needle that this finder searches for.
646:     /// Note that the lifetime of the needle returned is tied to the lifetime
648:     /// finder's needle can be either borrowed or owned, so the lifetime of the
649:     /// needle returned must necessarily be the shorter of the two.
652:         self.searcher.needle()
672:     /// Build a forward finder using the given needle from the current
676:         needle: &'n B,
678:         Finder { searcher: Searcher::new(self.config, needle.as_ref()) }
681:     /// Build a reverse finder using the given needle from the current
685:         needle: &'n B,
687:         FinderRev { searcher: SearcherRev::new(needle.as_ref()) }
703: /// variety of parameters (CPU support, target, needle size, haystack size and
708:     /// The actual needle we're searching for.
713:     /// A collection of facts computed on the needle that are useful for more
725: /// A collection of facts computed about a search needle.
731:     /// The offsets of "rare" bytes detected in the needle.
735:     /// one or two bytes. If we pick bytes from the needle that occur
742:     /// A Rabin-Karp hash of the needle.
764:     /// A special case for empty needles. An empty needle always matches, even
767:     /// This is used whenever the needle is a single byte. In this case, we
771:     /// linear time guarantee. In general, it's used when the needle is bigger
782:     fn new(config: SearcherConfig, needle: &'n [u8]) -> Searcher<'n> {
785:         let ninfo = NeedleInfo::new(needle);
787:             prefilter::forward(&config.prefilter, &ninfo.rarebytes, needle);
788:         let kind = if needle.len() == 0 {
790:         } else if needle.len() == 1 {
791:             OneByte(needle[0])
792:         } else if let Some(fwd) = x86::avx::Forward::new(&ninfo, needle) {
794:         } else if let Some(fwd) = x86::sse::Forward::new(&ninfo, needle) {
797:             TwoWay(twoway::Forward::new(needle))
799:         Searcher { needle: CowBytes::new(needle), ninfo, prefn, kind }
803:     fn new(config: SearcherConfig, needle: &'n [u8]) -> Searcher<'n> {
806:         let ninfo = NeedleInfo::new(needle);
808:             prefilter::forward(&config.prefilter, &ninfo.rarebytes, needle);
809:         let kind = if needle.len() == 0 {
811:         } else if needle.len() == 1 {
812:             OneByte(needle[0])
814:             TwoWay(twoway::Forward::new(needle))
816:         Searcher { needle: CowBytes::new(needle), ninfo, prefn, kind }
837:         self.needle.as_slice()
861:             needle: CowBytes::new(self.needle()),
890:             needle: self.needle.into_owned(),
908:         let needle = self.needle();
909:         if haystack.len() < needle.len() {
918:                 if rabinkarp::is_fast(haystack, needle) {
919:                     rabinkarp::find_with(&self.ninfo.nhash, haystack, needle)
921:                     self.find_tw(tw, state, haystack, needle)
933:                     rabinkarp::find_with(&self.ninfo.nhash, haystack, needle)
935:                     gs.find(haystack, needle)
947:                     rabinkarp::find_with(&self.ninfo.nhash, haystack, needle)
949:                     gs.find(haystack, needle)
955:     /// Calls Two-Way on the given haystack/needle.
970:         needle: &[u8],
980:                 return tw.find(Some(&mut pre), haystack, needle);
983:         tw.find(None, haystack, needle)
988:     pub(crate) fn new(needle: &[u8]) -> NeedleInfo {
990:             rarebytes: RareNeedleBytes::forward(needle),
991:             nhash: NeedleHash::forward(needle),
1005:     /// The actual needle we're searching for.
1007:     /// A Rabin-Karp hash of the needle.
1015:     /// A special case for empty needles. An empty needle always matches, even
1018:     /// This is used whenever the needle is a single byte. In this case, we
1022:     /// linear time guarantee. In general, it's used when the needle is bigger
1028:     fn new(needle: &'n [u8]) -> SearcherRev<'n> {
1031:         let kind = if needle.len() == 0 {
1033:         } else if needle.len() == 1 {
1034:             OneByte(needle[0])
1036:             TwoWay(twoway::Reverse::new(needle))
1039:             needle: CowBytes::new(needle),
1040:             nhash: NeedleHash::reverse(needle),
1046:         self.needle.as_slice()
1058:             needle: CowBytes::new(self.needle()),
1074:             needle: self.needle.into_owned(),
1087:         let needle = self.needle();
1088:         if haystack.len() < needle.len() {
1097:                 if rabinkarp::is_fast(haystack, needle) {
1098:                     rabinkarp::rfind_with(&self.nhash, haystack, needle)
1100:                     tw.rfind(haystack, needle)
1164:         needle: &[u8],
1168:             naive_rfind(haystack, needle) == search(haystack, needle)
1170:             naive_find(haystack, needle) == search(haystack, needle)
1174:     /// Naively search forwards for the given needle in the given haystack.
1175:     fn naive_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
1176:         if needle.is_empty() {
1178:         } else if haystack.len() < needle.len() {
1181:         for i in 0..(haystack.len() - needle.len() + 1) {
1182:             if needle == &haystack[i..i + needle.len()] {
1189:     /// Naively search in reverse for the given needle in the given haystack.
1190:     fn naive_rfind(haystack: &[u8], needle: &[u8]) -> Option<usize> {
1191:         if needle.is_empty() {
1193:         } else if haystack.len() < needle.len() {
1196:         for i in (0..(haystack.len() - needle.len() + 1)).rev() {
1197:             if needle == &haystack[i..i + needle.len()] {
1212:     /// Each test is a (needle, haystack, expected_fwd, expected_rev) tuple.
1258:     /// accepts a haystack and a needle and returns the starting position
1259:     /// of the first occurrence of needle in the haystack, or `None` if one
1264:         for &(needle, haystack, expected_fwd, _) in SEARCH_TESTS {
1265:             let (n, h) = (needle.as_bytes(), haystack.as_bytes());
1269:                 "needle: {:?}, haystack: {:?}, expected: {:?}",
1278:     /// accepts a haystack and a needle and returns the starting position of
1279:     /// the last occurrence of needle in the haystack, or `None` if one doesn't
1284:         for &(needle, haystack, _, expected_rev) in SEARCH_TESTS {
1285:             let (n, h) = (needle.as_bytes(), haystack.as_bytes());
1289:                 "needle: {:?}, haystack: {:?}, expected: {:?}",
5: arbitrary bytes. For all non-empty needles, these routines will report exactly
75:         rabinkarp::NeedleHash,
715:     ninfo: NeedleInfo,
751:     pub(crate) nhash: NeedleHash,
989:         NeedleInfo {
1008:     nhash: NeedleHash,
76:         rarebytes::RareNeedleBytes,
741:     pub(crate) rarebytes: RareNeedleBytes,
android.googlesource.com/platform/external/libchrome:base/debug/activity_tracker.h: [ master, ]
119:   } thread;
118:     int64_t thread_id;  // A unique identifier for a thread within a process.
279:     ACT_THREAD = 4 << 4,
594:     std::string thread_name;
604:     int64_t thread_id = 0;
751:   const PlatformThreadRef thread_id_;  // The thread this instance is bound to.
1208:   std::atomic<int> thread_tracker_count_;
1211:   ActivityTrackerMemoryAllocator thread_tracker_allocator_;
1212:   Lock thread_tracker_allocator_lock_;
160:   static ActivityData ForThread(const int64_t id) {
574: class BASE_EXPORT ThreadActivityTracker {
957:   ThreadActivityTracker* GetTrackerForCurrentThread() {
964:   ThreadActivityTracker* GetOrCreateTrackerForCurrentThread() {
1084:   class ThreadSafeUserData : public ActivityUserData {
280:     ACT_THREAD_START = ACT_THREAD,
281:     ACT_THREAD_JOIN,
1205:   ThreadLocalStorage::Slot this_thread_tracker_;
840:   class BASE_EXPORT ScopedThreadActivity
1076:     kMaxThreadCount = 100,
1077:     kCachedThreadMemories = 10,
1335: class BASE_EXPORT ScopedThreadJoinActivity
1339:   explicit ScopedThreadJoinActivity(const PlatformThreadHandle* thread)
36: #include "base/threading/platform_thread.h"
37: #include "base/threading/thread_local_storage.h"
162:     data.thread.thread_id = id;
184: // persistent memory allocator. Instances of this class are NOT thread-safe.
185: // Use from a single thread or protect access with a lock.
263:     // Task activities involve callbacks posted to a thread or thread-pool
278:     // Thread activities involve the life management of threads.
309:   // from a completely different thread though most activities will leave
345: // done by a thread by supporting key/value pairs of any type. This can provide
347: // global data. All updates must be done from the same thread though other
411:   // contents have been overwritten by another thread. The return value is
537:   // gone away (cleared by another thread/process), it will invalidate all the
565: // This class manages tracking a stack of activities for a single thread in
568: // thread is analyzing this data in real-time, atomic operations are used
578:   // This structure contains all the common information about the thread so
585:   // so that continued operation of the thread will not cause changes here.
592:     // The name of the thread as set when it was created. The name may be
599:     // The process and thread IDs. These values have no meaning other than
600:     // they uniquely identify a running process and a running thread within
601:     // that process.  Thread-IDs can be re-used across different processes
602:     // and both can be re-used after the process/thread exits.
606:     // The current stack of activities that are underway for this thread. It
635:     // The thread tracker to which this object reports. It can be null if
748:   // The ActivityTracker is thread bound, and will be invoked across all the
749:   // sequences that run on the thread. A ThreadChecker does not work here, as it
761: // The global tracker manages all the individual thread trackers. Memory for
762: // the thread trackers is taken from a PersistentMemoryAllocator which allows
837:   // This is a thin wrapper around the thread-tracker's ScopedActivity that
838:   // allows thread-safe access to data values. It is safe to use even if
854:     // Gets (or creates) a tracker for the current thread. If locking is not
857:     // the tracker for this thread has been created for other reasons, locks
858:     // will be tracked. The thread-tracker uses locks.
865:       // if code that runs late during thread destruction tries to use a
885:   // providing the given |stack_depth| to each thread tracker it manages. The