1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2008 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libguile/_scm.h"
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
55 # define ETIMEDOUT WSAETIMEDOUT
59 # define pipe(fd) _pipe (fd, 256, O_BINARY)
60 #endif /* __MINGW32__ */
64 /* Make an empty queue data structure.
69 return scm_cons (SCM_EOL, SCM_EOL);
72 /* Put T at the back of Q and return a handle that can be used with
73 remqueue to remove T from Q again.
76 enqueue (SCM q, SCM t)
78 SCM c = scm_cons (t, SCM_EOL);
79 if (scm_is_null (SCM_CDR (q)))
82 SCM_SETCDR (SCM_CAR (q), c);
87 /* Remove the element that the handle C refers to from the queue Q. C
88 must have been returned from a call to enqueue. The return value
89 is zero when the element referred to by C has already been removed.
90 Otherwise, 1 is returned.
93 remqueue (SCM q, SCM c)
96 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
100 if (scm_is_eq (c, SCM_CAR (q)))
101 SCM_SETCAR (q, SCM_CDR (c));
102 SCM_SETCDR (prev, SCM_CDR (c));
110 /* Remove the front-most element from the queue Q and return it.
111 Return SCM_BOOL_F when Q is empty.
121 SCM_SETCDR (q, SCM_CDR (c));
122 if (scm_is_null (SCM_CDR (q)))
123 SCM_SETCAR (q, SCM_EOL);
128 /*** Thread smob routines */
131 thread_mark (SCM obj)
133 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
134 scm_gc_mark (t->result);
135 scm_gc_mark (t->join_queue);
136 scm_gc_mark (t->dynwinds);
137 scm_gc_mark (t->active_asyncs);
138 scm_gc_mark (t->continuation_root);
139 return t->dynamic_state;
143 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
145 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
146 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
147 the struct case, hence we go via a union, and extract according to the
148 size of pthread_t. */
156 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
157 scm_i_pthread_t p = t->pthread;
160 if (sizeof (p) == sizeof (unsigned short))
162 else if (sizeof (p) == sizeof (unsigned int))
164 else if (sizeof (p) == sizeof (unsigned long))
169 scm_puts ("#<thread ", port);
170 scm_uintprint (id, 10, port);
171 scm_puts (" (", port);
172 scm_uintprint ((scm_t_bits)t, 16, port);
173 scm_puts (")>", port);
178 thread_free (SCM obj)
180 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
182 scm_gc_free (t, sizeof (*t), "thread");
186 /*** Blocking on queues. */
188 /* See also scm_i_queue_async_cell for how such a block is
192 /* Put the current thread on QUEUE and go to sleep, waiting for it to
193 be woken up by a call to 'unblock_from_queue', or to be
194 interrupted. Upon return of this function, the current thread is
195 no longer on QUEUE, even when the sleep has been interrupted.
197 The QUEUE data structure is assumed to be protected by MUTEX and
198 the caller of block_self must hold MUTEX. It will be atomically
199 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
201 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
204 When WAITTIME is not NULL, the sleep will be aborted at that time.
206 The return value of block_self is an errno value. It will be zero
207 when the sleep has been successfully completed by a call to
208 unblock_from_queue, EINTR when it has been interrupted by the
209 delivery of a system async, and ETIMEDOUT when the timeout has
212 The system asyncs themselves are not executed by block_self.
215 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
216 const scm_t_timespec *waittime)
218 scm_i_thread *t = SCM_I_CURRENT_THREAD;
222 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
227 q_handle = enqueue (queue, t->handle);
228 if (waittime == NULL)
229 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
231 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
233 /* When we are still on QUEUE, we have been interrupted. We
234 report this only when no other error (such as a timeout) has
237 if (remqueue (queue, q_handle) && err == 0)
240 scm_i_reset_sleep (t);
246 /* Wake up the first thread on QUEUE, if any. The caller must hold
247 the mutex that protects QUEUE. The awoken thread is returned, or
248 #f when the queue was empty.
251 unblock_from_queue (SCM queue)
253 SCM thread = dequeue (queue);
254 if (scm_is_true (thread))
255 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
259 /* Getting into and out of guile mode.
262 /* Ken Raeburn observes that the implementation of suspend and resume
263 (and the things that build on top of them) are very likely not
264 correct (see below). We will need fix this eventually, and that's
265 why scm_leave_guile/scm_enter_guile are not exported in the API.
269 Consider this sequence:
271 Function foo, called in Guile mode, calls suspend (maybe indirectly
272 through scm_leave_guile), which does this:
274 // record top of stack for the GC
275 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
278 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
279 SCM_I_SETJMP (t->regs); // here's most of the magic
283 Function foo has a SCM value X, a handle on a non-immediate object, in
284 a caller-saved register R, and it's the only reference to the object
287 The compiler wants to use R in suspend, so it pushes the current
288 value, X, into a stack slot which will be reloaded on exit from
289 suspend; then it loads stuff into R and goes about its business. The
290 setjmp call saves (some of) the current registers, including R, which
291 no longer contains X. (This isn't a problem for a normal
292 setjmp/longjmp situation, where longjmp would be called before
293 setjmp's caller returns; the old value for X would be loaded back from
294 the stack after the longjmp, before the function returned.)
296 So, suspend returns, loading X back into R (and invalidating the jump
297 buffer) in the process. The caller foo then goes off and calls a
298 bunch of other functions out of Guile mode, occasionally storing X on
299 the stack again, but, say, much deeper on the stack than suspend's
300 stack frame went, and the stack slot where suspend had written X has
301 long since been overwritten with other values.
303 Okay, nothing actively broken so far. Now, let garbage collection
304 run, triggered by another thread.
306 The thread calling foo is out of Guile mode at the time, so the
307 garbage collector just scans a range of stack addresses. Too bad that
308 X isn't stored there. So the pointed-to storage goes onto the free
309 list, and I think you can see where things go from there.
311 Is there anything I'm missing that'll prevent this scenario from
312 happening? I mean, aside from, "well, suspend and scm_leave_guile
313 don't have many local variables, so they probably won't need to save
314 any registers on most systems, so we hope everything will wind up in
315 the jump buffer and we'll just get away with it"?
317 (And, going the other direction, if scm_leave_guile and suspend push
318 the stack pointer over onto a new page, and foo doesn't make further
319 function calls and thus the stack pointer no longer includes that
320 page, are we guaranteed that the kernel cannot release the now-unused
321 stack page that contains the top-of-stack pointer we just saved? I
322 don't know if any OS actually does that. If it does, we could get
323 faults in garbage collection.)
325 I don't think scm_without_guile has to have this problem, as it gets
326 more control over the stack handling -- but it should call setjmp
327 itself. I'd probably try something like:
329 // record top of stack for the GC
330 t->top = SCM_STACK_PTR (&t);
332 SCM_FLUSH_REGISTER_WINDOWS;
333 SCM_I_SETJMP (t->regs);
337 ... though even that's making some assumptions about the stack
338 ordering of local variables versus caller-saved registers.
340 For something like scm_leave_guile to work, I don't think it can just
341 rely on invalidated jump buffers. A valid jump buffer, and a handle
342 on the stack state at the point when the jump buffer was initialized,
343 together, would work fine, but I think then we're talking about macros
344 invoking setjmp in the caller's stack frame, and requiring that the
345 caller of scm_leave_guile also call scm_enter_guile before returning,
346 kind of like pthread_cleanup_push/pop calls that have to be paired up
347 in a function. (In fact, the pthread ones have to be paired up
348 syntactically, as if they might expand to a compound statement
349 incorporating the user's code, and invoking a compiler's
350 exception-handling primitives. Which might be something to think
351 about for cases where Guile is used with C++ exceptions or
355 scm_i_pthread_key_t scm_i_thread_key;
358 resume (scm_i_thread *t)
361 if (t->clear_freelists_p)
363 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
364 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
365 t->clear_freelists_p = 0;
369 typedef void* scm_t_guile_ticket;
372 scm_enter_guile (scm_t_guile_ticket ticket)
374 scm_i_thread *t = (scm_i_thread *)ticket;
377 scm_i_pthread_mutex_lock (&t->heap_mutex);
382 static scm_i_thread *
385 scm_i_thread *t = SCM_I_CURRENT_THREAD;
387 /* record top of stack for the GC */
388 t->top = SCM_STACK_PTR (&t);
389 /* save registers. */
390 SCM_FLUSH_REGISTER_WINDOWS;
391 SCM_I_SETJMP (t->regs);
395 static scm_t_guile_ticket
398 scm_i_thread *t = suspend ();
399 scm_i_pthread_mutex_unlock (&t->heap_mutex);
400 return (scm_t_guile_ticket) t;
403 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
404 static scm_i_thread *all_threads = NULL;
405 static int thread_count;
407 static SCM scm_i_default_dynamic_state;
409 /* Perform first stage of thread initialisation, in non-guile mode.
412 guilify_self_1 (SCM_STACKITEM *base)
414 scm_i_thread *t = malloc (sizeof (scm_i_thread));
416 t->pthread = scm_i_pthread_self ();
417 t->handle = SCM_BOOL_F;
418 t->result = SCM_BOOL_F;
419 t->join_queue = SCM_EOL;
420 t->dynamic_state = SCM_BOOL_F;
421 t->dynwinds = SCM_EOL;
422 t->active_asyncs = SCM_EOL;
424 t->pending_asyncs = 1;
425 t->critical_section_level = 0;
426 t->last_debug_frame = NULL;
429 /* Calculate and store off the base of this thread's register
430 backing store (RBS). Unfortunately our implementation(s) of
431 scm_ia64_register_backing_store_base are only reliable for the
432 main thread. For other threads, therefore, find out the current
433 top of the RBS, and use that as a maximum. */
434 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
439 bsp = scm_ia64_ar_bsp (&ctx);
440 if (t->register_backing_store_base > bsp)
441 t->register_backing_store_base = bsp;
444 t->continuation_root = SCM_EOL;
445 t->continuation_base = base;
446 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
447 t->sleep_mutex = NULL;
448 t->sleep_object = SCM_BOOL_F;
451 if (pipe (t->sleep_pipe) != 0)
452 /* FIXME: Error conditions during the initialization phase are handled
453 gracelessly since public functions such as `scm_init_guile ()'
454 currently have type `void'. */
457 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
458 t->clear_freelists_p = 0;
462 t->freelist = SCM_EOL;
463 t->freelist2 = SCM_EOL;
464 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
465 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
467 scm_i_pthread_setspecific (scm_i_thread_key, t);
469 /* As soon as this thread adds itself to the global thread list, the
470 GC may think that it has a stack that needs marking. Therefore
471 initialize t->top to be the same as t->base, just in case GC runs
472 before the thread can lock its heap_mutex for the first time. */
474 scm_i_pthread_mutex_lock (&thread_admin_mutex);
475 t->next_thread = all_threads;
478 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
480 /* Enter Guile mode. */
484 /* Perform second stage of thread initialisation, in guile mode.
487 guilify_self_2 (SCM parent)
489 scm_i_thread *t = SCM_I_CURRENT_THREAD;
491 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
492 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
493 t->continuation_root = scm_cons (t->handle, SCM_EOL);
494 t->continuation_base = t->base;
496 if (scm_is_true (parent))
497 t->dynamic_state = scm_make_dynamic_state (parent);
499 t->dynamic_state = scm_i_make_initial_dynamic_state ();
501 t->join_queue = make_queue ();
505 /* Perform thread tear-down, in guile mode.
508 do_thread_exit (void *v)
510 scm_i_thread *t = (scm_i_thread *)v;
512 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
515 close (t->sleep_pipe[0]);
516 close (t->sleep_pipe[1]);
517 while (scm_is_true (unblock_from_queue (t->join_queue)))
520 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
525 on_thread_exit (void *v)
527 /* This handler is executed in non-guile mode. */
528 scm_i_thread *t = (scm_i_thread *)v, **tp;
530 scm_i_pthread_setspecific (scm_i_thread_key, v);
532 /* Unblocking the joining threads needs to happen in guile mode
533 since the queue is a SCM data structure. */
534 scm_with_guile (do_thread_exit, v);
536 /* Removing ourself from the list of all threads needs to happen in
537 non-guile mode since all SCM values on our stack become
538 unprotected once we are no longer in the list. */
539 scm_i_pthread_mutex_lock (&thread_admin_mutex);
540 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
543 *tp = t->next_thread;
547 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
549 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
552 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
555 init_thread_key (void)
557 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
560 /* Perform any initializations necessary to bring the current thread
561 into guile mode, initializing Guile itself, if necessary.
563 BASE is the stack base to use with GC.
565 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
566 which case the default dynamic state is used.
568 Return zero when the thread was in guile mode already; otherwise
573 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
577 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
579 if ((t = SCM_I_CURRENT_THREAD) == NULL)
581 /* This thread has not been guilified yet.
584 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
585 if (scm_initialized_p == 0)
587 /* First thread ever to enter Guile. Run the full
590 scm_i_init_guile (base);
591 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
595 /* Guile is already initialized, but this thread enters it for
596 the first time. Only initialize this thread.
598 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
599 guilify_self_1 (base);
600 guilify_self_2 (parent);
606 /* This thread is already guilified but not in guile mode, just
609 A user call to scm_with_guile() will lead us to here. This could
610 happen from anywhere on the stack, and in particular lower on the
611 stack than when it was when this thread was first guilified. Thus,
612 `base' must be updated. */
613 #if SCM_STACK_GROWS_UP
621 scm_enter_guile ((scm_t_guile_ticket) t);
626 /* Thread is already in guile mode. Nothing to do.
632 #if SCM_USE_PTHREAD_THREADS
634 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
635 /* This method for GNU/Linux and perhaps some other systems.
636 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
637 available on them. */
638 #define HAVE_GET_THREAD_STACK_BASE
640 static SCM_STACKITEM *
641 get_thread_stack_base ()
647 pthread_getattr_np (pthread_self (), &attr);
648 pthread_attr_getstack (&attr, &start, &size);
649 end = (char *)start + size;
651 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
652 for the main thread, but we can use scm_get_stack_base in that
656 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
657 if ((void *)&attr < start || (void *)&attr >= end)
658 return scm_get_stack_base ();
662 #if SCM_STACK_GROWS_UP
670 #elif HAVE_PTHREAD_GET_STACKADDR_NP
671 /* This method for MacOS X.
672 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
673 but as of 2006 there's nothing obvious at apple.com. */
674 #define HAVE_GET_THREAD_STACK_BASE
675 static SCM_STACKITEM *
676 get_thread_stack_base ()
678 return pthread_get_stackaddr_np (pthread_self ());
681 #elif defined (__MINGW32__)
682 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
683 in any thread. We don't like hard-coding the name of a system, but there
684 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
686 #define HAVE_GET_THREAD_STACK_BASE
687 static SCM_STACKITEM *
688 get_thread_stack_base ()
690 return scm_get_stack_base ();
693 #endif /* pthread methods of get_thread_stack_base */
695 #else /* !SCM_USE_PTHREAD_THREADS */
697 #define HAVE_GET_THREAD_STACK_BASE
699 static SCM_STACKITEM *
700 get_thread_stack_base ()
702 return scm_get_stack_base ();
705 #endif /* !SCM_USE_PTHREAD_THREADS */
707 #ifdef HAVE_GET_THREAD_STACK_BASE
712 scm_i_init_thread_for_guile (get_thread_stack_base (),
713 scm_i_default_dynamic_state);
719 scm_with_guile (void *(*func)(void *), void *data)
721 return scm_i_with_guile_and_parent (func, data,
722 scm_i_default_dynamic_state);
726 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
731 SCM_STACKITEM base_item;
732 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
733 res = scm_c_with_continuation_barrier (func, data);
740 scm_without_guile (void *(*func)(void *), void *data)
743 scm_t_guile_ticket t;
744 t = scm_leave_guile ();
750 /*** Thread creation */
757 scm_i_pthread_mutex_t mutex;
758 scm_i_pthread_cond_t cond;
762 really_launch (void *d)
764 launch_data *data = (launch_data *)d;
765 SCM thunk = data->thunk, handler = data->handler;
768 t = SCM_I_CURRENT_THREAD;
770 scm_i_scm_pthread_mutex_lock (&data->mutex);
771 data->thread = scm_current_thread ();
772 scm_i_pthread_cond_signal (&data->cond);
773 scm_i_pthread_mutex_unlock (&data->mutex);
775 if (SCM_UNBNDP (handler))
776 t->result = scm_call_0 (thunk);
778 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
784 launch_thread (void *d)
786 launch_data *data = (launch_data *)d;
787 scm_i_pthread_detach (scm_i_pthread_self ());
788 scm_i_with_guile_and_parent (really_launch, d, data->parent);
792 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
793 (SCM thunk, SCM handler),
794 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
795 "returning a new thread object representing the thread. The procedure\n"
796 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
798 "When @var{handler} is specified, then @var{thunk} is called from\n"
799 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
800 "handler. This catch is established inside the continuation barrier.\n"
802 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
803 "the @emph{exit value} of the thread and the thread is terminated.")
804 #define FUNC_NAME s_scm_call_with_new_thread
810 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
811 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
812 handler, SCM_ARG2, FUNC_NAME);
814 data.parent = scm_current_dynamic_state ();
816 data.handler = handler;
817 data.thread = SCM_BOOL_F;
818 scm_i_pthread_mutex_init (&data.mutex, NULL);
819 scm_i_pthread_cond_init (&data.cond, NULL);
821 scm_i_scm_pthread_mutex_lock (&data.mutex);
822 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
825 scm_i_pthread_mutex_unlock (&data.mutex);
829 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
830 scm_i_pthread_mutex_unlock (&data.mutex);
838 scm_t_catch_body body;
840 scm_t_catch_handler handler;
843 scm_i_pthread_mutex_t mutex;
844 scm_i_pthread_cond_t cond;
848 really_spawn (void *d)
850 spawn_data *data = (spawn_data *)d;
851 scm_t_catch_body body = data->body;
852 void *body_data = data->body_data;
853 scm_t_catch_handler handler = data->handler;
854 void *handler_data = data->handler_data;
855 scm_i_thread *t = SCM_I_CURRENT_THREAD;
857 scm_i_scm_pthread_mutex_lock (&data->mutex);
858 data->thread = scm_current_thread ();
859 scm_i_pthread_cond_signal (&data->cond);
860 scm_i_pthread_mutex_unlock (&data->mutex);
863 t->result = body (body_data);
865 t->result = scm_internal_catch (SCM_BOOL_T,
867 handler, handler_data);
873 spawn_thread (void *d)
875 spawn_data *data = (spawn_data *)d;
876 scm_i_pthread_detach (scm_i_pthread_self ());
877 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
882 scm_spawn_thread (scm_t_catch_body body, void *body_data,
883 scm_t_catch_handler handler, void *handler_data)
889 data.parent = scm_current_dynamic_state ();
891 data.body_data = body_data;
892 data.handler = handler;
893 data.handler_data = handler_data;
894 data.thread = SCM_BOOL_F;
895 scm_i_pthread_mutex_init (&data.mutex, NULL);
896 scm_i_pthread_cond_init (&data.cond, NULL);
898 scm_i_scm_pthread_mutex_lock (&data.mutex);
899 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
902 scm_i_pthread_mutex_unlock (&data.mutex);
906 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
907 scm_i_pthread_mutex_unlock (&data.mutex);
912 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
914 "Move the calling thread to the end of the scheduling queue.")
915 #define FUNC_NAME s_scm_yield
917 return scm_from_bool (scm_i_sched_yield ());
921 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
923 "Suspend execution of the calling thread until the target @var{thread} "
924 "terminates, unless the target @var{thread} has already terminated. ")
925 #define FUNC_NAME s_scm_join_thread
930 SCM_VALIDATE_THREAD (1, thread);
931 if (scm_is_eq (scm_current_thread (), thread))
932 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
934 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
936 t = SCM_I_THREAD_DATA (thread);
939 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
942 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
944 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
948 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
955 /* We implement our own mutex type since we want them to be 'fair', we
956 want to do fancy things while waiting for them (like running
957 asyncs) and we might want to add things that are nice for
962 scm_i_pthread_mutex_t lock;
964 int level; /* how much the owner owns us.
965 < 0 for non-recursive mutexes */
966 SCM waiting; /* the threads waiting for this mutex. */
969 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
970 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
973 fat_mutex_mark (SCM mx)
975 fat_mutex *m = SCM_MUTEX_DATA (mx);
976 scm_gc_mark (m->owner);
981 fat_mutex_free (SCM mx)
983 fat_mutex *m = SCM_MUTEX_DATA (mx);
984 scm_i_pthread_mutex_destroy (&m->lock);
985 scm_gc_free (m, sizeof (fat_mutex), "mutex");
990 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
992 fat_mutex *m = SCM_MUTEX_DATA (mx);
993 scm_puts ("#<mutex ", port);
994 scm_uintprint ((scm_t_bits)m, 16, port);
995 scm_puts (">", port);
1000 make_fat_mutex (int recursive)
1005 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1006 scm_i_pthread_mutex_init (&m->lock, NULL);
1007 m->owner = SCM_BOOL_F;
1008 m->level = recursive? 0 : -1;
1009 m->waiting = SCM_EOL;
1010 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1011 m->waiting = make_queue ();
1015 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
1017 "Create a new mutex. ")
1018 #define FUNC_NAME s_scm_make_mutex
1020 return make_fat_mutex (0);
1024 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1026 "Create a new recursive mutex. ")
1027 #define FUNC_NAME s_scm_make_recursive_mutex
1029 return make_fat_mutex (1);
1034 fat_mutex_lock (SCM mutex)
1036 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1037 SCM thread = scm_current_thread ();
1040 scm_i_scm_pthread_mutex_lock (&m->lock);
1041 if (scm_is_false (m->owner))
1043 else if (scm_is_eq (m->owner, thread))
1048 msg = "mutex already locked by current thread";
1054 if (scm_is_eq (m->owner, thread))
1056 block_self (m->waiting, mutex, &m->lock, NULL);
1057 scm_i_pthread_mutex_unlock (&m->lock);
1059 scm_i_scm_pthread_mutex_lock (&m->lock);
1062 scm_i_pthread_mutex_unlock (&m->lock);
1066 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
1068 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1069 "blocks until the mutex becomes available. The function returns when "
1070 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1071 "a thread already owns will succeed right away and will not block the "
1072 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1073 #define FUNC_NAME s_scm_lock_mutex
1077 SCM_VALIDATE_MUTEX (1, mx);
1078 msg = fat_mutex_lock (mx);
1080 scm_misc_error (NULL, msg, SCM_EOL);
1086 scm_dynwind_lock_mutex (SCM mutex)
1088 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1089 SCM_F_WIND_EXPLICITLY);
1090 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1091 SCM_F_WIND_EXPLICITLY);
1095 fat_mutex_trylock (fat_mutex *m, int *resp)
1098 SCM thread = scm_current_thread ();
1101 scm_i_pthread_mutex_lock (&m->lock);
1102 if (scm_is_false (m->owner))
1104 else if (scm_is_eq (m->owner, thread))
1109 msg = "mutex already locked by current thread";
1113 scm_i_pthread_mutex_unlock (&m->lock);
1117 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1119 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1120 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1121 #define FUNC_NAME s_scm_try_mutex
1126 SCM_VALIDATE_MUTEX (1, mutex);
1128 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1130 scm_misc_error (NULL, msg, SCM_EOL);
1131 return scm_from_bool (res);
1136 fat_mutex_unlock (fat_mutex *m)
1140 scm_i_scm_pthread_mutex_lock (&m->lock);
1141 if (!scm_is_eq (m->owner, scm_current_thread ()))
1143 if (scm_is_false (m->owner))
1144 msg = "mutex not locked";
1146 msg = "mutex not locked by current thread";
1148 else if (m->level > 0)
1151 m->owner = unblock_from_queue (m->waiting);
1152 scm_i_pthread_mutex_unlock (&m->lock);
1157 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1159 "Unlocks @var{mutex} if the calling thread owns the lock on "
1160 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1161 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1162 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1163 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1164 "with a call to @code{unlock-mutex}. Only the last call to "
1165 "@code{unlock-mutex} will actually unlock the mutex. ")
1166 #define FUNC_NAME s_scm_unlock_mutex
1169 SCM_VALIDATE_MUTEX (1, mx);
1171 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1173 scm_misc_error (NULL, msg, SCM_EOL);
1180 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1182 "Return the thread owning @var{mx}, or @code{#f}.")
1183 #define FUNC_NAME s_scm_mutex_owner
1185 SCM_VALIDATE_MUTEX (1, mx);
1186 return (SCM_MUTEX_DATA(mx))->owner;
1190 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1192 "Return the lock level of a recursive mutex, or -1\n"
1193 "for a standard mutex.")
1194 #define FUNC_NAME s_scm_mutex_level
1196 SCM_VALIDATE_MUTEX (1, mx);
1197 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1203 /*** Fat condition variables */
1206 scm_i_pthread_mutex_t lock;
1207 SCM waiting; /* the threads waiting for this condition. */
1210 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1211 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1214 fat_cond_mark (SCM cv)
1216 fat_cond *c = SCM_CONDVAR_DATA (cv);
1221 fat_cond_free (SCM mx)
1223 fat_cond *c = SCM_CONDVAR_DATA (mx);
1224 scm_i_pthread_mutex_destroy (&c->lock);
1225 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1230 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1232 fat_cond *c = SCM_CONDVAR_DATA (cv);
1233 scm_puts ("#<condition-variable ", port);
1234 scm_uintprint ((scm_t_bits)c, 16, port);
1235 scm_puts (">", port);
1239 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1241 "Make a new condition variable.")
1242 #define FUNC_NAME s_scm_make_condition_variable
1247 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1248 scm_i_pthread_mutex_init (&c->lock, 0);
1249 c->waiting = SCM_EOL;
1250 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1251 c->waiting = make_queue ();
1257 fat_cond_timedwait (SCM cond, SCM mutex,
1258 const scm_t_timespec *waittime)
1260 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1261 fat_cond *c = SCM_CONDVAR_DATA (cond);
1262 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1268 scm_i_scm_pthread_mutex_lock (&c->lock);
1269 msg = fat_mutex_unlock (m);
1273 err = block_self (c->waiting, cond, &c->lock, waittime);
1274 scm_i_pthread_mutex_unlock (&c->lock);
1275 fat_mutex_lock (mutex);
1278 scm_i_pthread_mutex_unlock (&c->lock);
1283 scm_misc_error (NULL, msg, SCM_EOL);
1285 scm_remember_upto_here_2 (cond, mutex);
1289 if (err == ETIMEDOUT)
1294 scm_syserror (NULL);
1299 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1300 (SCM cv, SCM mx, SCM t),
1301 "Wait until @var{cond-var} has been signalled. While waiting, "
1302 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1303 "is locked again when this function returns. When @var{time} is given, "
1304 "it specifies a point in time where the waiting should be aborted. It "
1305 "can be either a integer as returned by @code{current-time} or a pair "
1306 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1307 "mutex is locked and @code{#f} is returned. When the condition "
1308 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1310 #define FUNC_NAME s_scm_timed_wait_condition_variable
1312 scm_t_timespec waittime, *waitptr = NULL;
1314 SCM_VALIDATE_CONDVAR (1, cv);
1315 SCM_VALIDATE_MUTEX (2, mx);
1317 if (!SCM_UNBNDP (t))
1319 if (scm_is_pair (t))
1321 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1322 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1326 waittime.tv_sec = scm_to_ulong (t);
1327 waittime.tv_nsec = 0;
1329 waitptr = &waittime;
1332 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1337 fat_cond_signal (fat_cond *c)
1339 scm_i_scm_pthread_mutex_lock (&c->lock);
1340 unblock_from_queue (c->waiting);
1341 scm_i_pthread_mutex_unlock (&c->lock);
1344 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1346 "Wake up one thread that is waiting for @var{cv}")
1347 #define FUNC_NAME s_scm_signal_condition_variable
1349 SCM_VALIDATE_CONDVAR (1, cv);
1350 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1356 fat_cond_broadcast (fat_cond *c)
1358 scm_i_scm_pthread_mutex_lock (&c->lock);
1359 while (scm_is_true (unblock_from_queue (c->waiting)))
1361 scm_i_pthread_mutex_unlock (&c->lock);
1364 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1366 "Wake up all threads that are waiting for @var{cv}. ")
1367 #define FUNC_NAME s_scm_broadcast_condition_variable
1369 SCM_VALIDATE_CONDVAR (1, cv);
1370 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1375 /*** Marking stacks */
1377 /* XXX - what to do with this? Do we need to handle this for blocked
1381 # define SCM_MARK_BACKING_STORE() do { \
1383 SCM_STACKITEM * top, * bot; \
1384 getcontext (&ctx); \
1385 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1386 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1387 / sizeof (SCM_STACKITEM))); \
1388 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1389 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1390 scm_mark_locations (bot, top - bot); } while (0)
1392 # define SCM_MARK_BACKING_STORE()
1396 scm_threads_mark_stacks (void)
1399 for (t = all_threads; t; t = t->next_thread)
1401 /* Check that thread has indeed been suspended.
1405 scm_gc_mark (t->handle);
1407 #if SCM_STACK_GROWS_UP
1408 scm_mark_locations (t->base, t->top - t->base);
1410 scm_mark_locations (t->top, t->base - t->top);
1412 scm_mark_locations ((void *) &t->regs,
1413 ((size_t) sizeof(t->regs)
1414 / sizeof (SCM_STACKITEM)));
1417 SCM_MARK_BACKING_STORE ();
1423 scm_std_select (int nfds,
1424 SELECT_TYPE *readfds,
1425 SELECT_TYPE *writefds,
1426 SELECT_TYPE *exceptfds,
1427 struct timeval *timeout)
1430 int res, eno, wakeup_fd;
1431 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1432 scm_t_guile_ticket ticket;
1434 if (readfds == NULL)
1436 FD_ZERO (&my_readfds);
1437 readfds = &my_readfds;
1440 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1443 wakeup_fd = t->sleep_pipe[0];
1444 ticket = scm_leave_guile ();
1445 FD_SET (wakeup_fd, readfds);
1446 if (wakeup_fd >= nfds)
1448 res = select (nfds, readfds, writefds, exceptfds, timeout);
1451 scm_enter_guile (ticket);
1453 scm_i_reset_sleep (t);
1455 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1460 count = read (wakeup_fd, &dummy, 1);
1462 FD_CLR (wakeup_fd, readfds);
1474 /* Convenience API for blocking while in guile mode. */
1476 #if SCM_USE_PTHREAD_THREADS
1479 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1481 scm_t_guile_ticket t = scm_leave_guile ();
1482 int res = scm_i_pthread_mutex_lock (mutex);
1483 scm_enter_guile (t);
1488 do_unlock (void *data)
1490 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1494 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1496 scm_i_scm_pthread_mutex_lock (mutex);
1497 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1501 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1503 scm_t_guile_ticket t = scm_leave_guile ();
1504 int res = scm_i_pthread_cond_wait (cond, mutex);
1505 scm_enter_guile (t);
1510 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1511 scm_i_pthread_mutex_t *mutex,
1512 const scm_t_timespec *wt)
1514 scm_t_guile_ticket t = scm_leave_guile ();
1515 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1516 scm_enter_guile (t);
1523 scm_std_usleep (unsigned long usecs)
1526 tv.tv_usec = usecs % 1000000;
1527 tv.tv_sec = usecs / 1000000;
1528 scm_std_select (0, NULL, NULL, NULL, &tv);
1529 return tv.tv_sec * 1000000 + tv.tv_usec;
1533 scm_std_sleep (unsigned int secs)
1538 scm_std_select (0, NULL, NULL, NULL, &tv);
1544 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1546 "Return the thread that called this function.")
1547 #define FUNC_NAME s_scm_current_thread
1549 return SCM_I_CURRENT_THREAD->handle;
1554 scm_c_make_list (size_t n, SCM fill)
1558 res = scm_cons (fill, res);
1562 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1564 "Return a list of all threads.")
1565 #define FUNC_NAME s_scm_all_threads
1567 /* We can not allocate while holding the thread_admin_mutex because
1568 of the way GC is done.
1570 int n = thread_count;
1572 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1574 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1576 for (t = all_threads; t && n > 0; t = t->next_thread)
1578 SCM_SETCAR (*l, t->handle);
1579 l = SCM_CDRLOC (*l);
1583 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1588 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1590 "Return @code{#t} iff @var{thread} has exited.\n")
1591 #define FUNC_NAME s_scm_thread_exited_p
1593 return scm_from_bool (scm_c_thread_exited_p (thread));
1598 scm_c_thread_exited_p (SCM thread)
1599 #define FUNC_NAME s_scm_thread_exited_p
1602 SCM_VALIDATE_THREAD (1, thread);
1603 t = SCM_I_THREAD_DATA (thread);
1608 static scm_i_pthread_cond_t wake_up_cond;
1609 int scm_i_thread_go_to_sleep;
1610 static int threads_initialized_p = 0;
1613 scm_i_thread_put_to_sleep ()
1615 if (threads_initialized_p)
1620 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1622 /* Signal all threads to go to sleep
1624 scm_i_thread_go_to_sleep = 1;
1625 for (t = all_threads; t; t = t->next_thread)
1626 scm_i_pthread_mutex_lock (&t->heap_mutex);
1627 scm_i_thread_go_to_sleep = 0;
1632 scm_i_thread_invalidate_freelists ()
1634 /* thread_admin_mutex is already locked. */
1637 for (t = all_threads; t; t = t->next_thread)
1638 if (t != SCM_I_CURRENT_THREAD)
1639 t->clear_freelists_p = 1;
1643 scm_i_thread_wake_up ()
1645 if (threads_initialized_p)
1649 scm_i_pthread_cond_broadcast (&wake_up_cond);
1650 for (t = all_threads; t; t = t->next_thread)
1651 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1652 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1653 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1658 scm_i_thread_sleep_for_gc ()
1660 scm_i_thread *t = suspend ();
1661 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1665 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1667 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1669 static SCM dynwind_critical_section_mutex;
1672 scm_dynwind_critical_section (SCM mutex)
1674 if (scm_is_false (mutex))
1675 mutex = dynwind_critical_section_mutex;
1676 scm_dynwind_lock_mutex (mutex);
1677 scm_dynwind_block_asyncs ();
1680 /*** Initialization */
1682 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1684 scm_i_pthread_key_t *scm_i_freelist_ptr = &scm_i_freelist;
1685 scm_i_pthread_key_t *scm_i_freelist2_ptr = &scm_i_freelist2;
1687 scm_i_pthread_mutex_t scm_i_misc_mutex;
1689 #if SCM_USE_PTHREAD_THREADS
1690 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1694 scm_threads_prehistory (SCM_STACKITEM *base)
1696 #if SCM_USE_PTHREAD_THREADS
1697 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1698 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1699 PTHREAD_MUTEX_RECURSIVE);
1702 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1703 scm_i_pthread_mutexattr_recursive);
1704 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1705 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1706 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1707 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1709 guilify_self_1 (base);
1712 scm_t_bits scm_tc16_thread;
1713 scm_t_bits scm_tc16_mutex;
1714 scm_t_bits scm_tc16_condvar;
1719 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1720 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1721 scm_set_smob_print (scm_tc16_thread, thread_print);
1722 scm_set_smob_free (scm_tc16_thread, thread_free);
1724 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1725 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1726 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1727 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1729 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1731 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1732 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1733 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1735 scm_i_default_dynamic_state = SCM_BOOL_F;
1736 guilify_self_2 (SCM_BOOL_F);
1737 threads_initialized_p = 1;
1739 dynwind_critical_section_mutex =
1740 scm_permanent_object (scm_make_recursive_mutex ());
1744 scm_init_threads_default_dynamic_state ()
1746 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1747 scm_i_default_dynamic_state = scm_permanent_object (state);
1751 scm_init_thread_procs ()
1753 #include "libguile/threads.x"