fix clang V2
This commit is contained in:
parent
eed403ea0c
commit
7d0b6e781d
1 changed files with 1265 additions and 1221 deletions
|
@ -50,14 +50,21 @@ to create, resume, yield or destroy a coroutine.
|
||||||
|
|
||||||
# Caveats
|
# Caveats
|
||||||
|
|
||||||
- Avoid using coroutines with C++ exceptions, this is not recommended, it may not behave as you expect.
|
- Avoid using coroutines with C++ exceptions, this is not recommended, it may not behave as you
|
||||||
- When using C++ RAII (i.e. destructors) you must resume the coroutine until it dies to properly execute all destructors.
|
expect.
|
||||||
|
- When using C++ RAII (i.e. destructors) you must resume the coroutine until it dies to properly
|
||||||
|
execute all destructors.
|
||||||
- Some unsupported sanitizers for C may trigger false warnings when using coroutines.
|
- Some unsupported sanitizers for C may trigger false warnings when using coroutines.
|
||||||
- The `mco_coro` object is not thread safe, you should use a mutex for manipulating it in multithread applications.
|
- The `mco_coro` object is not thread safe, you should use a mutex for manipulating it in
|
||||||
- To use in multithread applications, you must compile with C compiler that supports `thread_local` qualifier.
|
multithread applications.
|
||||||
- Avoid using `thread_local` inside coroutine code, the compiler may cache thread local variables pointers which can be invalid when a coroutine switch threads.
|
- To use in multithread applications, you must compile with C compiler that supports `thread_local`
|
||||||
- Stack space is limited. By default it has 56KB of space, this can be changed on coroutine creation, or by enabling the virtual memory backed allocator to make it 2040KB.
|
qualifier.
|
||||||
- Take care to not cause stack overflows (run out of stack space), otherwise your program may crash or not, the behavior is undefined.
|
- Avoid using `thread_local` inside coroutine code, the compiler may cache thread local variables
|
||||||
|
pointers which can be invalid when a coroutine switch threads.
|
||||||
|
- Stack space is limited. By default it has 56KB of space, this can be changed on coroutine
|
||||||
|
creation, or by enabling the virtual memory backed allocator to make it 2040KB.
|
||||||
|
- Take care to not cause stack overflows (run out of stack space), otherwise your program may crash
|
||||||
|
or not, the behavior is undefined.
|
||||||
- On WebAssembly you must compile with Emscripten flag `-s ASYNCIFY=1`.
|
- On WebAssembly you must compile with Emscripten flag `-s ASYNCIFY=1`.
|
||||||
- The WebAssembly Binaryen asyncify method can be used when explicitly enabled,
|
- The WebAssembly Binaryen asyncify method can be used when explicitly enabled,
|
||||||
you may want to do this only to use minicoro with WebAssembly native interpreters
|
you may want to do this only to use minicoro with WebAssembly native interpreters
|
||||||
|
@ -72,7 +79,8 @@ a coroutine only suspends its execution by explicitly calling a yield function.
|
||||||
|
|
||||||
You create a coroutine by calling `mco_create`.
|
You create a coroutine by calling `mco_create`.
|
||||||
Its sole argument is a `mco_desc` structure with a description for the coroutine.
|
Its sole argument is a `mco_desc` structure with a description for the coroutine.
|
||||||
The `mco_create` function only creates a new coroutine and returns a handle to it, it does not start the coroutine.
|
The `mco_create` function only creates a new coroutine and returns a handle to it, it does not start
|
||||||
|
the coroutine.
|
||||||
|
|
||||||
You execute a coroutine by calling `mco_resume`.
|
You execute a coroutine by calling `mco_resume`.
|
||||||
When calling a resume function the coroutine starts its execution by calling its body function.
|
When calling a resume function the coroutine starts its execution by calling its body function.
|
||||||
|
@ -81,7 +89,8 @@ After the coroutine starts running, it runs until it terminates or yields.
|
||||||
A coroutine yields by calling `mco_yield`.
|
A coroutine yields by calling `mco_yield`.
|
||||||
When a coroutine yields, the corresponding resume returns immediately,
|
When a coroutine yields, the corresponding resume returns immediately,
|
||||||
even if the yield happens inside nested function calls (that is, not in the main function).
|
even if the yield happens inside nested function calls (that is, not in the main function).
|
||||||
The next time you resume the same coroutine, it continues its execution from the point where it yielded.
|
The next time you resume the same coroutine, it continues its execution from the point where it
|
||||||
|
yielded.
|
||||||
|
|
||||||
To associate a persistent value with the coroutine,
|
To associate a persistent value with the coroutine,
|
||||||
you can optionally set `user_data` on its creation and later retrieve with `mco_get_user_data`.
|
you can optionally set `user_data` on its creation and later retrieve with `mco_get_user_data`.
|
||||||
|
@ -89,7 +98,8 @@ you can optionally set `user_data` on its creation and later retrieve with `mco
|
||||||
To pass values between resume and yield,
|
To pass values between resume and yield,
|
||||||
you can optionally use `mco_push` and `mco_pop` APIs,
|
you can optionally use `mco_push` and `mco_pop` APIs,
|
||||||
they are intended to pass temporary values using a LIFO style buffer.
|
they are intended to pass temporary values using a LIFO style buffer.
|
||||||
The storage system can also be used to send and receive initial values on coroutine creation or before it finishes.
|
The storage system can also be used to send and receive initial values on coroutine creation or
|
||||||
|
before it finishes.
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
|
|
||||||
|
@ -108,9 +118,9 @@ The following simple example demonstrates on how to use the library:
|
||||||
|
|
||||||
```c
|
```c
|
||||||
#define MINICORO_IMPL
|
#define MINICORO_IMPL
|
||||||
#include "minicoro.h"
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include "minicoro.h"
|
||||||
|
|
||||||
// Coroutine entry function.
|
// Coroutine entry function.
|
||||||
void coro_entry(mco_coro* co) {
|
void coro_entry(mco_coro* co) {
|
||||||
|
@ -181,8 +191,9 @@ for example, an application with 100 thousands coroutine with stacks of 56KB wou
|
||||||
as 5GB of memory, however your application may not really full stack usage for every coroutine.
|
as 5GB of memory, however your application may not really full stack usage for every coroutine.
|
||||||
|
|
||||||
Some developers often prefer stackless coroutines over stackful coroutines
|
Some developers often prefer stackless coroutines over stackful coroutines
|
||||||
because of this problem, stackless memory footprint is low, therefore often considered more lightweight.
|
because of this problem, stackless memory footprint is low, therefore often considered more
|
||||||
However stackless have many other limitations, like you cannot run unconstrained code inside them.
|
lightweight. However stackless have many other limitations, like you cannot run unconstrained code
|
||||||
|
inside them.
|
||||||
|
|
||||||
One remedy to the solution is to make stackful coroutines growable,
|
One remedy to the solution is to make stackful coroutines growable,
|
||||||
to only use physical memory on demand when its really needed,
|
to only use physical memory on demand when its really needed,
|
||||||
|
@ -192,19 +203,21 @@ when supported by the operating system.
|
||||||
The virtual memory backed allocator will reserve virtual memory in the OS for each coroutine stack,
|
The virtual memory backed allocator will reserve virtual memory in the OS for each coroutine stack,
|
||||||
but not trigger real physical memory usage yet.
|
but not trigger real physical memory usage yet.
|
||||||
While the application virtual memory usage will be high,
|
While the application virtual memory usage will be high,
|
||||||
the physical memory usage will be low and actually grow on demand (usually every 4KB chunk in Linux).
|
the physical memory usage will be low and actually grow on demand (usually every 4KB chunk in
|
||||||
|
Linux).
|
||||||
|
|
||||||
The virtual memory backed allocator also raises the default stack size to about 2MB,
|
The virtual memory backed allocator also raises the default stack size to about 2MB,
|
||||||
typically the size of extra threads in Linux,
|
typically the size of extra threads in Linux,
|
||||||
so you have more space in your coroutines and the risk of stack overflow is low.
|
so you have more space in your coroutines and the risk of stack overflow is low.
|
||||||
|
|
||||||
As an example, allocating 100 thousands coroutines with nearly 2MB stack reserved space
|
As an example, allocating 100 thousands coroutines with nearly 2MB stack reserved space
|
||||||
with the virtual memory allocator uses 783MB of physical memory usage, that is about 8KB per coroutine,
|
with the virtual memory allocator uses 783MB of physical memory usage, that is about 8KB per
|
||||||
however the virtual memory usage will be at 98GB.
|
coroutine, however the virtual memory usage will be at 98GB.
|
||||||
|
|
||||||
It is recommended to enable this option only if you plan to spawn thousands of coroutines
|
It is recommended to enable this option only if you plan to spawn thousands of coroutines
|
||||||
while wanting to have a low memory footprint.
|
while wanting to have a low memory footprint.
|
||||||
Not all environments have an OS with virtual memory support, therefore this option is disabled by default.
|
Not all environments have an OS with virtual memory support, therefore this option is disabled by
|
||||||
|
default.
|
||||||
|
|
||||||
This option may add an order of magnitude overhead to `mco_create()`/`mco_destroy()`,
|
This option may add an order of magnitude overhead to `mco_create()`/`mco_destroy()`,
|
||||||
because they will request the OS to manage virtual memory page tables,
|
because they will request the OS to manage virtual memory page tables,
|
||||||
|
@ -215,17 +228,23 @@ if this is a problem for you, please customize a custom allocator for your own n
|
||||||
The following can be defined to change the library behavior:
|
The following can be defined to change the library behavior:
|
||||||
|
|
||||||
- `MCO_API` - Public API qualifier. Default is `extern`.
|
- `MCO_API` - Public API qualifier. Default is `extern`.
|
||||||
- `MCO_MIN_STACK_SIZE` - Minimum stack size when creating a coroutine. Default is 32768 (32KB).
|
- `MCO_MIN_STACK_SIZE` - Minimum stack size when creating a coroutine. Default is 32768
|
||||||
|
(32KB).
|
||||||
- `MCO_DEFAULT_STORAGE_SIZE` - Size of coroutine storage buffer. Default is 1024.
|
- `MCO_DEFAULT_STORAGE_SIZE` - Size of coroutine storage buffer. Default is 1024.
|
||||||
- `MCO_DEFAULT_STACK_SIZE` - Default stack size when creating a coroutine. Default is 57344 (56KB). When `MCO_USE_VMEM_ALLOCATOR` is true the default is 2040KB (nearly 2MB).
|
- `MCO_DEFAULT_STACK_SIZE` - Default stack size when creating a coroutine. Default is 57344
|
||||||
|
(56KB). When `MCO_USE_VMEM_ALLOCATOR` is true the default is 2040KB (nearly 2MB).
|
||||||
- `MCO_ALLOC` - Default allocation function. Default is `calloc`.
|
- `MCO_ALLOC` - Default allocation function. Default is `calloc`.
|
||||||
- `MCO_DEALLOC` - Default deallocation function. Default is `free`.
|
- `MCO_DEALLOC` - Default deallocation function. Default is `free`.
|
||||||
- `MCO_USE_VMEM_ALLOCATOR` - Use virtual memory backed allocator, improving memory footprint per coroutine.
|
- `MCO_USE_VMEM_ALLOCATOR` - Use virtual memory backed allocator, improving memory footprint per
|
||||||
|
coroutine.
|
||||||
- `MCO_NO_DEFAULT_ALLOCATOR` - Disable the default allocator using `MCO_ALLOC` and `MCO_DEALLOC`.
|
- `MCO_NO_DEFAULT_ALLOCATOR` - Disable the default allocator using `MCO_ALLOC` and `MCO_DEALLOC`.
|
||||||
- `MCO_ZERO_MEMORY` - Zero memory of stack when poping storage, intended for garbage collected environments.
|
- `MCO_ZERO_MEMORY` - Zero memory of stack when poping storage, intended for garbage
|
||||||
- `MCO_DEBUG` - Enable debug mode, logging any runtime error to stdout. Defined automatically unless `NDEBUG` or `MCO_NO_DEBUG` is defined.
|
collected environments.
|
||||||
|
- `MCO_DEBUG` - Enable debug mode, logging any runtime error to stdout. Defined
|
||||||
|
automatically unless `NDEBUG` or `MCO_NO_DEBUG` is defined.
|
||||||
- `MCO_NO_DEBUG` - Disable debug mode.
|
- `MCO_NO_DEBUG` - Disable debug mode.
|
||||||
- `MCO_NO_MULTITHREAD` - Disable multithread usage. Multithread is supported when `thread_local` is supported.
|
- `MCO_NO_MULTITHREAD` - Disable multithread usage. Multithread is supported when
|
||||||
|
`thread_local` is supported.
|
||||||
- `MCO_USE_ASM` - Force use of assembly context switch implementation.
|
- `MCO_USE_ASM` - Force use of assembly context switch implementation.
|
||||||
- `MCO_USE_UCONTEXT` - Force use of ucontext context switch implementation.
|
- `MCO_USE_UCONTEXT` - Force use of ucontext context switch implementation.
|
||||||
- `MCO_USE_FIBERS` - Force use of fibers context switch implementation.
|
- `MCO_USE_FIBERS` - Force use of fibers context switch implementation.
|
||||||
|
@ -237,7 +256,6 @@ The following can be defined to change the library behavior:
|
||||||
Your choice of either Public Domain or MIT No Attribution, see end of file.
|
Your choice of either Public Domain or MIT No Attribution, see end of file.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#ifndef MINICORO_H
|
#ifndef MINICORO_H
|
||||||
#define MINICORO_H
|
#define MINICORO_H
|
||||||
|
|
||||||
|
@ -262,9 +280,11 @@ extern "C" {
|
||||||
/* Coroutine states. */
|
/* Coroutine states. */
|
||||||
typedef enum mco_state {
|
typedef enum mco_state {
|
||||||
MCO_DEAD = 0, /* The coroutine has finished normally or was uninitialized before finishing. */
|
MCO_DEAD = 0, /* The coroutine has finished normally or was uninitialized before finishing. */
|
||||||
MCO_NORMAL, /* The coroutine is active but not running (that is, it has resumed another coroutine). */
|
MCO_NORMAL, /* The coroutine is active but not running (that is, it has resumed another
|
||||||
|
coroutine). */
|
||||||
MCO_RUNNING, /* The coroutine is active and running. */
|
MCO_RUNNING, /* The coroutine is active and running. */
|
||||||
MCO_SUSPENDED /* The coroutine is suspended (in a call to yield, or it has not started running yet). */
|
MCO_SUSPENDED /* The coroutine is suspended (in a call to yield, or it has not started running
|
||||||
|
yet). */
|
||||||
} mco_state;
|
} mco_state;
|
||||||
|
|
||||||
/* Coroutine result codes. */
|
/* Coroutine result codes. */
|
||||||
|
@ -312,7 +332,8 @@ typedef struct mco_desc {
|
||||||
void* user_data; /* Coroutine user data, can be get with `mco_get_user_data`. */
|
void* user_data; /* Coroutine user data, can be get with `mco_get_user_data`. */
|
||||||
/* Custom allocation interface. */
|
/* Custom allocation interface. */
|
||||||
void* (*alloc_cb)(size_t size, void* allocator_data); /* Custom allocation function. */
|
void* (*alloc_cb)(size_t size, void* allocator_data); /* Custom allocation function. */
|
||||||
void (*dealloc_cb)(void* ptr, size_t size, void* allocator_data); /* Custom deallocation function. */
|
void (*dealloc_cb)(void* ptr, size_t size,
|
||||||
|
void* allocator_data); /* Custom deallocation function. */
|
||||||
void* allocator_data; /* User data pointer passed to `alloc`/`dealloc` allocation functions. */
|
void* allocator_data; /* User data pointer passed to `alloc`/`dealloc` allocation functions. */
|
||||||
size_t storage_size; /* Coroutine storage size, to be used with the storage APIs. */
|
size_t storage_size; /* Coroutine storage size, to be used with the storage APIs. */
|
||||||
/* These must be initialized only through `mco_init_desc`. */
|
/* These must be initialized only through `mco_init_desc`. */
|
||||||
|
@ -321,21 +342,35 @@ typedef struct mco_desc {
|
||||||
} mco_desc;
|
} mco_desc;
|
||||||
|
|
||||||
/* Coroutine functions. */
|
/* Coroutine functions. */
|
||||||
MCO_API mco_desc mco_desc_init(void (*func)(mco_coro* co), size_t stack_size); /* Initialize description of a coroutine. When stack size is 0 then MCO_DEFAULT_STACK_SIZE is used. */
|
MCO_API mco_desc
|
||||||
|
mco_desc_init(void (*func)(mco_coro* co),
|
||||||
|
size_t stack_size); /* Initialize description of a coroutine. When stack size is 0
|
||||||
|
then MCO_DEFAULT_STACK_SIZE is used. */
|
||||||
MCO_API mco_result mco_init(mco_coro* co, mco_desc* desc); /* Initialize the coroutine. */
|
MCO_API mco_result mco_init(mco_coro* co, mco_desc* desc); /* Initialize the coroutine. */
|
||||||
MCO_API mco_result mco_uninit(mco_coro* co); /* Uninitialize the coroutine, may fail if it's not dead or suspended. */
|
MCO_API mco_result
|
||||||
MCO_API mco_result mco_create(mco_coro** out_co, mco_desc* desc); /* Allocates and initializes a new coroutine. */
|
mco_uninit(mco_coro* co); /* Uninitialize the coroutine, may fail if it's not dead or suspended. */
|
||||||
MCO_API mco_result mco_destroy(mco_coro* co); /* Uninitialize and deallocate the coroutine, may fail if it's not dead or suspended. */
|
MCO_API mco_result mco_create(mco_coro** out_co,
|
||||||
MCO_API mco_result mco_resume(mco_coro* co); /* Starts or continues the execution of the coroutine. */
|
mco_desc* desc); /* Allocates and initializes a new coroutine. */
|
||||||
|
MCO_API mco_result mco_destroy(mco_coro* co); /* Uninitialize and deallocate the coroutine, may fail
|
||||||
|
if it's not dead or suspended. */
|
||||||
|
MCO_API mco_result
|
||||||
|
mco_resume(mco_coro* co); /* Starts or continues the execution of the coroutine. */
|
||||||
MCO_API mco_result mco_yield(mco_coro* co); /* Suspends the execution of a coroutine. */
|
MCO_API mco_result mco_yield(mco_coro* co); /* Suspends the execution of a coroutine. */
|
||||||
MCO_API mco_state mco_status(mco_coro* co); /* Returns the status of the coroutine. */
|
MCO_API mco_state mco_status(mco_coro* co); /* Returns the status of the coroutine. */
|
||||||
MCO_API void* mco_get_user_data(mco_coro* co); /* Get coroutine user data supplied on coroutine creation. */
|
MCO_API void* mco_get_user_data(
|
||||||
|
mco_coro* co); /* Get coroutine user data supplied on coroutine creation. */
|
||||||
|
|
||||||
/* Storage interface functions, used to pass values between yield and resume. */
|
/* Storage interface functions, used to pass values between yield and resume. */
|
||||||
MCO_API mco_result mco_push(mco_coro* co, const void* src, size_t len); /* Push bytes to the coroutine storage. Use to send values between yield and resume. */
|
MCO_API mco_result mco_push(mco_coro* co, const void* src,
|
||||||
MCO_API mco_result mco_pop(mco_coro* co, void* dest, size_t len); /* Pop bytes from the coroutine storage. Use to get values between yield and resume. */
|
size_t len); /* Push bytes to the coroutine storage. Use to send values
|
||||||
MCO_API mco_result mco_peek(mco_coro* co, void* dest, size_t len); /* Like `mco_pop` but it does not consumes the storage. */
|
between yield and resume. */
|
||||||
MCO_API size_t mco_get_bytes_stored(mco_coro* co); /* Get the available bytes that can be retrieved with a `mco_pop`. */
|
MCO_API mco_result mco_pop(mco_coro* co, void* dest,
|
||||||
|
size_t len); /* Pop bytes from the coroutine storage. Use to get values
|
||||||
|
between yield and resume. */
|
||||||
|
MCO_API mco_result mco_peek(mco_coro* co, void* dest,
|
||||||
|
size_t len); /* Like `mco_pop` but it does not consumes the storage. */
|
||||||
|
MCO_API size_t mco_get_bytes_stored(
|
||||||
|
mco_coro* co); /* Get the available bytes that can be retrieved with a `mco_pop`. */
|
||||||
MCO_API size_t mco_get_storage_size(mco_coro* co); /* Get the total storage size. */
|
MCO_API size_t mco_get_storage_size(mco_coro* co); /* Get the total storage size. */
|
||||||
|
|
||||||
/* Misc functions. */
|
/* Misc functions. */
|
||||||
|
@ -365,7 +400,8 @@ extern "C" {
|
||||||
#ifndef MCO_DEFAULT_STACK_SIZE
|
#ifndef MCO_DEFAULT_STACK_SIZE
|
||||||
/* Use multiples of 64KB minus 8KB, because 8KB is reserved for coroutine internal structures. */
|
/* Use multiples of 64KB minus 8KB, because 8KB is reserved for coroutine internal structures. */
|
||||||
#ifdef MCO_USE_VMEM_ALLOCATOR
|
#ifdef MCO_USE_VMEM_ALLOCATOR
|
||||||
#define MCO_DEFAULT_STACK_SIZE 2040*1024 /* 2040KB, nearly the same stack size of a thread in x86_64 Linux. */
|
#define MCO_DEFAULT_STACK_SIZE \
|
||||||
|
2040 * 1024 /* 2040KB, nearly the same stack size of a thread in x86_64 Linux. */
|
||||||
#else
|
#else
|
||||||
#define MCO_DEFAULT_STACK_SIZE 56 * 1024 /* 56KB */
|
#define MCO_DEFAULT_STACK_SIZE 56 * 1024 /* 56KB */
|
||||||
#endif
|
#endif
|
||||||
|
@ -375,7 +411,8 @@ extern "C" {
|
||||||
#define MCO_MAGIC_NUMBER 0x7E3CB1A9
|
#define MCO_MAGIC_NUMBER 0x7E3CB1A9
|
||||||
|
|
||||||
/* Detect implementation based on OS, arch and compiler. */
|
/* Detect implementation based on OS, arch and compiler. */
|
||||||
#if !defined(MCO_USE_UCONTEXT) && !defined(MCO_USE_FIBERS) && !defined(MCO_USE_ASM) && !defined(MCO_USE_ASYNCIFY)
|
#if !defined(MCO_USE_UCONTEXT) && !defined(MCO_USE_FIBERS) && !defined(MCO_USE_ASM) && \
|
||||||
|
!defined(MCO_USE_ASYNCIFY)
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
#if (defined(__GNUC__) && defined(__x86_64__)) || (defined(_MSC_VER) && defined(_M_X64))
|
#if (defined(__GNUC__) && defined(__x86_64__)) || (defined(_MSC_VER) && defined(_M_X64))
|
||||||
#define MCO_USE_ASM
|
#define MCO_USE_ASM
|
||||||
|
@ -390,10 +427,8 @@ extern "C" {
|
||||||
#define MCO_USE_ASYNCIFY
|
#define MCO_USE_ASYNCIFY
|
||||||
#else
|
#else
|
||||||
#if __GNUC__ >= 3 /* Assembly extension supported. */
|
#if __GNUC__ >= 3 /* Assembly extension supported. */
|
||||||
#if defined(__x86_64__) || \
|
#if defined(__x86_64__) || defined(__i386) || defined(__i386__) || defined(__ARM_EABI__) || \
|
||||||
defined(__i386) || defined(__i386__) || \
|
defined(__aarch64__) || defined(__riscv)
|
||||||
defined(__ARM_EABI__) || defined(__aarch64__) || \
|
|
||||||
defined(__riscv)
|
|
||||||
#define MCO_USE_ASM
|
#define MCO_USE_ASM
|
||||||
#else
|
#else
|
||||||
#define MCO_USE_UCONTEXT
|
#define MCO_USE_UCONTEXT
|
||||||
|
@ -436,7 +471,8 @@ extern "C" {
|
||||||
#define MCO_THREAD_LOCAL thread_local
|
#define MCO_THREAD_LOCAL thread_local
|
||||||
#elif __STDC_VERSION__ >= 201112 && !defined(__STDC_NO_THREADS__)
|
#elif __STDC_VERSION__ >= 201112 && !defined(__STDC_NO_THREADS__)
|
||||||
#define MCO_THREAD_LOCAL _Thread_local
|
#define MCO_THREAD_LOCAL _Thread_local
|
||||||
#elif defined(_WIN32) && (defined(_MSC_VER) || defined(__ICL) || defined(__DMC__) || defined(__BORLANDC__))
|
#elif defined(_WIN32) && \
|
||||||
|
(defined(_MSC_VER) || defined(__ICL) || defined(__DMC__) || defined(__BORLANDC__))
|
||||||
#define MCO_THREAD_LOCAL __declspec(thread)
|
#define MCO_THREAD_LOCAL __declspec(thread)
|
||||||
#elif defined(__GNUC__) || defined(__SUNPRO_C) || defined(__xlC__)
|
#elif defined(__GNUC__) || defined(__SUNPRO_C) || defined(__xlC__)
|
||||||
#define MCO_THREAD_LOCAL __thread
|
#define MCO_THREAD_LOCAL __thread
|
||||||
|
@ -456,7 +492,8 @@ extern "C" {
|
||||||
#else
|
#else
|
||||||
#define MCO_FORCE_INLINE inline __attribute__((always_inline))
|
#define MCO_FORCE_INLINE inline __attribute__((always_inline))
|
||||||
#endif
|
#endif
|
||||||
#elif defined(__BORLANDC__) || defined(__DMC__) || defined(__SC__) || defined(__WATCOMC__) || defined(__LCC__) || defined(__DECC)
|
#elif defined(__BORLANDC__) || defined(__DMC__) || defined(__SC__) || defined(__WATCOMC__) || \
|
||||||
|
defined(__LCC__) || defined(__DECC)
|
||||||
#define MCO_FORCE_INLINE __inline
|
#define MCO_FORCE_INLINE __inline
|
||||||
#else /* No inline support. */
|
#else /* No inline support. */
|
||||||
#define MCO_FORCE_INLINE
|
#define MCO_FORCE_INLINE
|
||||||
|
@ -512,8 +549,8 @@ extern "C" {
|
||||||
#else /* C allocator */
|
#else /* C allocator */
|
||||||
#ifndef MCO_ALLOC
|
#ifndef MCO_ALLOC
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
/* We use calloc() so we give a chance for the OS to reserve virtual memory without really using physical memory,
|
/* We use calloc() so we give a chance for the OS to reserve virtual memory without really using
|
||||||
calloc() also has the nice property of initializing the stack to zeros. */
|
physical memory, calloc() also has the nice property of initializing the stack to zeros. */
|
||||||
#define MCO_ALLOC(size) calloc(1, size)
|
#define MCO_ALLOC(size) calloc(1, size)
|
||||||
#define MCO_DEALLOC(ptr, size) free(ptr)
|
#define MCO_DEALLOC(ptr, size) free(ptr)
|
||||||
#endif
|
#endif
|
||||||
|
@ -545,7 +582,8 @@ extern "C" {
|
||||||
#endif
|
#endif
|
||||||
#ifdef _MCO_USE_ASAN
|
#ifdef _MCO_USE_ASAN
|
||||||
void __sanitizer_start_switch_fiber(void** fake_stack_save, const void* bottom, size_t size);
|
void __sanitizer_start_switch_fiber(void** fake_stack_save, const void* bottom, size_t size);
|
||||||
void __sanitizer_finish_switch_fiber(void* fake_stack_save, const void **bottom_old, size_t *size_old);
|
void __sanitizer_finish_switch_fiber(void* fake_stack_save, const void** bottom_old,
|
||||||
|
size_t* size_old);
|
||||||
#endif
|
#endif
|
||||||
#ifdef _MCO_USE_TSAN
|
#ifdef _MCO_USE_TSAN
|
||||||
void* __tsan_get_current_fiber(void);
|
void* __tsan_get_current_fiber(void);
|
||||||
|
@ -578,7 +616,8 @@ static MCO_FORCE_INLINE void _mco_prepare_jumpin(mco_coro* co) {
|
||||||
if (prev_co) {
|
if (prev_co) {
|
||||||
void* bottom_old = NULL;
|
void* bottom_old = NULL;
|
||||||
size_t size_old = 0;
|
size_t size_old = 0;
|
||||||
__sanitizer_finish_switch_fiber(prev_co->asan_prev_stack, (const void**)&bottom_old, &size_old);
|
__sanitizer_finish_switch_fiber(prev_co->asan_prev_stack, (const void**)&bottom_old,
|
||||||
|
&size_old);
|
||||||
prev_co->asan_prev_stack = NULL;
|
prev_co->asan_prev_stack = NULL;
|
||||||
}
|
}
|
||||||
__sanitizer_start_switch_fiber(&co->asan_prev_stack, co->stack_base, co->stack_size);
|
__sanitizer_start_switch_fiber(&co->asan_prev_stack, co->stack_base, co->stack_size);
|
||||||
|
@ -750,9 +789,12 @@ _MCO_ASM_BLOB static unsigned char _mco_switch_code[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
void (*_mco_wrap_main)(void) = (void (*)(void))(void*)_mco_wrap_main_code;
|
void (*_mco_wrap_main)(void) = (void (*)(void))(void*)_mco_wrap_main_code;
|
||||||
void (*_mco_switch)(_mco_ctxbuf* from, _mco_ctxbuf* to) = (void(*)(_mco_ctxbuf* from, _mco_ctxbuf* to))(void*)_mco_switch_code;
|
void (*_mco_switch)(_mco_ctxbuf* from,
|
||||||
|
_mco_ctxbuf* to) = (void (*)(_mco_ctxbuf* from,
|
||||||
|
_mco_ctxbuf* to))(void*)_mco_switch_code;
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
stack_size = stack_size - 32; /* Reserve 32 bytes for the shadow space. */
|
stack_size = stack_size - 32; /* Reserve 32 bytes for the shadow space. */
|
||||||
void** stack_high_ptr = (void**)((size_t)stack_base + stack_size - sizeof(size_t));
|
void** stack_high_ptr = (void**)((size_t)stack_base + stack_size - sizeof(size_t));
|
||||||
stack_high_ptr[0] = (void*)(0xdeaddeaddeaddead); /* Dummy return address. */
|
stack_high_ptr[0] = (void*)(0xdeaddeaddeaddead); /* Dummy return address. */
|
||||||
|
@ -776,8 +818,7 @@ typedef struct _mco_ctxbuf {
|
||||||
void _mco_wrap_main(void);
|
void _mco_wrap_main(void);
|
||||||
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
#ifdef __MACH__ /* Mac OS X assembler */
|
#ifdef __MACH__ /* Mac OS X assembler */
|
||||||
".globl __mco_wrap_main\n"
|
".globl __mco_wrap_main\n"
|
||||||
"__mco_wrap_main:\n"
|
"__mco_wrap_main:\n"
|
||||||
|
@ -794,8 +835,7 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
#ifdef __MACH__ /* Mac OS assembler */
|
#ifdef __MACH__ /* Mac OS assembler */
|
||||||
".globl __mco_switch\n"
|
".globl __mco_switch\n"
|
||||||
"__mco_switch:\n"
|
"__mco_switch:\n"
|
||||||
|
@ -828,8 +868,10 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
stack_size = stack_size - 128; /* Reserve 128 bytes for the Red Zone space (System V AMD64 ABI). */
|
size_t stack_size) {
|
||||||
|
stack_size =
|
||||||
|
stack_size - 128; /* Reserve 128 bytes for the Red Zone space (System V AMD64 ABI). */
|
||||||
void** stack_high_ptr = (void**)((size_t)stack_base + stack_size - sizeof(size_t));
|
void** stack_high_ptr = (void**)((size_t)stack_base + stack_size - sizeof(size_t));
|
||||||
stack_high_ptr[0] = (void*)(0xdeaddeaddeaddead); /* Dummy return address. */
|
stack_high_ptr[0] = (void*)(0xdeaddeaddeaddead); /* Dummy return address. */
|
||||||
ctx->rip = (void*)(_mco_wrap_main);
|
ctx->rip = (void*)(_mco_wrap_main);
|
||||||
|
@ -860,19 +902,16 @@ typedef struct _mco_ctxbuf {
|
||||||
void _mco_wrap_main(void);
|
void _mco_wrap_main(void);
|
||||||
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
".globl _mco_wrap_main\n"
|
".globl _mco_wrap_main\n"
|
||||||
".type _mco_wrap_main @function\n"
|
".type _mco_wrap_main @function\n"
|
||||||
".hidden _mco_wrap_main\n"
|
".hidden _mco_wrap_main\n"
|
||||||
"_mco_wrap_main:\n"
|
"_mco_wrap_main:\n"
|
||||||
" mv a0, s0\n"
|
" mv a0, s0\n"
|
||||||
" jr s1\n"
|
" jr s1\n"
|
||||||
".size _mco_wrap_main, .-_mco_wrap_main\n"
|
".size _mco_wrap_main, .-_mco_wrap_main\n");
|
||||||
);
|
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
".globl _mco_switch\n"
|
".globl _mco_switch\n"
|
||||||
".type _mco_switch @function\n"
|
".type _mco_switch @function\n"
|
||||||
".hidden _mco_switch\n"
|
".hidden _mco_switch\n"
|
||||||
|
@ -1029,10 +1068,10 @@ __asm__(
|
||||||
#else
|
#else
|
||||||
#error "Unsupported RISC-V XLEN"
|
#error "Unsupported RISC-V XLEN"
|
||||||
#endif /* __riscv_xlen */
|
#endif /* __riscv_xlen */
|
||||||
".size _mco_switch, .-_mco_switch\n"
|
".size _mco_switch, .-_mco_switch\n");
|
||||||
);
|
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
ctx->s[0] = (void*)(co);
|
ctx->s[0] = (void*)(co);
|
||||||
ctx->s[1] = (void*)(_mco_main);
|
ctx->s[1] = (void*)(_mco_main);
|
||||||
ctx->pc = (void*)(_mco_wrap_main);
|
ctx->pc = (void*)(_mco_wrap_main);
|
||||||
|
@ -1088,7 +1127,8 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
void** stack_high_ptr = (void**)((size_t)stack_base + stack_size - 16 - 1 * sizeof(size_t));
|
void** stack_high_ptr = (void**)((size_t)stack_base + stack_size - 16 - 1 * sizeof(size_t));
|
||||||
stack_high_ptr[0] = (void*)(0xdeaddead); /* Dummy return address. */
|
stack_high_ptr[0] = (void*)(0xdeaddead); /* Dummy return address. */
|
||||||
stack_high_ptr[1] = (void*)(co);
|
stack_high_ptr[1] = (void*)(co);
|
||||||
|
@ -1112,8 +1152,7 @@ typedef struct _mco_ctxbuf {
|
||||||
void _mco_wrap_main(void);
|
void _mco_wrap_main(void);
|
||||||
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
".globl __mco_switch\n"
|
".globl __mco_switch\n"
|
||||||
"__mco_switch:\n"
|
"__mco_switch:\n"
|
||||||
|
@ -1138,8 +1177,7 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
".globl __mco_wrap_main\n"
|
".globl __mco_wrap_main\n"
|
||||||
"__mco_wrap_main:\n"
|
"__mco_wrap_main:\n"
|
||||||
|
@ -1158,7 +1196,8 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
ctx->d[0] = (void*)(co);
|
ctx->d[0] = (void*)(co);
|
||||||
ctx->d[1] = (void*)(_mco_main);
|
ctx->d[1] = (void*)(_mco_main);
|
||||||
ctx->d[2] = (void*)(0xdeaddead); /* Dummy return address. */
|
ctx->d[2] = (void*)(0xdeaddead); /* Dummy return address. */
|
||||||
|
@ -1179,8 +1218,7 @@ typedef struct _mco_ctxbuf {
|
||||||
void _mco_wrap_main(void);
|
void _mco_wrap_main(void);
|
||||||
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
int _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
".globl __mco_switch\n"
|
".globl __mco_switch\n"
|
||||||
"__mco_switch:\n"
|
"__mco_switch:\n"
|
||||||
|
@ -1222,8 +1260,7 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
__asm__(
|
__asm__(".text\n"
|
||||||
".text\n"
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
".globl __mco_wrap_main\n"
|
".globl __mco_wrap_main\n"
|
||||||
"__mco_wrap_main:\n"
|
"__mco_wrap_main:\n"
|
||||||
|
@ -1241,7 +1278,8 @@ __asm__(
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
ctx->x[0] = (void*)(co);
|
ctx->x[0] = (void*)(co);
|
||||||
ctx->x[1] = (void*)(_mco_main);
|
ctx->x[1] = (void*)(_mco_main);
|
||||||
ctx->x[2] = (void*)(0xdeaddeaddeaddead); /* Dummy return address. */
|
ctx->x[2] = (void*)(0xdeaddeaddeaddead); /* Dummy return address. */
|
||||||
|
@ -1264,7 +1302,8 @@ typedef ucontext_t _mco_ctxbuf;
|
||||||
|
|
||||||
#if defined(_LP64) || defined(__LP64__)
|
#if defined(_LP64) || defined(__LP64__)
|
||||||
static void _mco_wrap_main(unsigned int lo, unsigned int hi) {
|
static void _mco_wrap_main(unsigned int lo, unsigned int hi) {
|
||||||
mco_coro* co = (mco_coro*)(((size_t)lo) | (((size_t)hi) << 32)); /* Extract coroutine pointer. */
|
mco_coro* co =
|
||||||
|
(mco_coro*)(((size_t)lo) | (((size_t)hi) << 32)); /* Extract coroutine pointer. */
|
||||||
_mco_main(co);
|
_mco_main(co);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -1280,7 +1319,8 @@ static MCO_FORCE_INLINE void _mco_switch(_mco_ctxbuf* from, _mco_ctxbuf* to) {
|
||||||
MCO_ASSERT(res == 0);
|
MCO_ASSERT(res == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base, size_t stack_size) {
|
static mco_result _mco_makectx(mco_coro* co, _mco_ctxbuf* ctx, void* stack_base,
|
||||||
|
size_t stack_size) {
|
||||||
/* Initialize ucontext. */
|
/* Initialize ucontext. */
|
||||||
if (getcontext(ctx) != 0) {
|
if (getcontext(ctx) != 0) {
|
||||||
MCO_LOG("failed to get ucontext");
|
MCO_LOG("failed to get ucontext");
|
||||||
|
@ -1370,8 +1410,7 @@ static void _mco_destroy_context(mco_coro* co) {
|
||||||
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
||||||
desc->coro_size = _mco_align_forward(sizeof(mco_coro), 16) +
|
desc->coro_size = _mco_align_forward(sizeof(mco_coro), 16) +
|
||||||
_mco_align_forward(sizeof(_mco_context), 16) +
|
_mco_align_forward(sizeof(_mco_context), 16) +
|
||||||
_mco_align_forward(desc->storage_size, 16) +
|
_mco_align_forward(desc->storage_size, 16) + stack_size + 16;
|
||||||
stack_size + 16;
|
|
||||||
desc->stack_size = stack_size; /* This is just a hint, it won't be the real one. */
|
desc->stack_size = stack_size; /* This is just a hint, it won't be the real one. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1390,7 +1429,10 @@ typedef struct _mco_context {
|
||||||
|
|
||||||
static void _mco_jumpin(mco_coro* co) {
|
static void _mco_jumpin(mco_coro* co) {
|
||||||
void* cur_fib = GetCurrentFiber();
|
void* cur_fib = GetCurrentFiber();
|
||||||
if(!cur_fib || cur_fib == (void*)0x1e00) { /* See http://blogs.msdn.com/oldnewthing/archive/2004/12/31/344799.aspx */
|
if (!cur_fib ||
|
||||||
|
cur_fib ==
|
||||||
|
(void*)0x1e00) { /* See http://blogs.msdn.com/oldnewthing/archive/2004/12/31/344799.aspx
|
||||||
|
*/
|
||||||
cur_fib = ConvertThreadToFiber(NULL);
|
cur_fib = ConvertThreadToFiber(NULL);
|
||||||
}
|
}
|
||||||
MCO_ASSERT(cur_fib != NULL);
|
MCO_ASSERT(cur_fib != NULL);
|
||||||
|
@ -1437,7 +1479,8 @@ static mco_result _mco_create_context(mco_coro* co, mco_desc* desc) {
|
||||||
/* Initialize storage. */
|
/* Initialize storage. */
|
||||||
unsigned char* storage = (unsigned char*)storage_addr;
|
unsigned char* storage = (unsigned char*)storage_addr;
|
||||||
/* Create the fiber. */
|
/* Create the fiber. */
|
||||||
_mco_fiber* fib = (_mco_fiber*)CreateFiberEx(desc->stack_size, desc->stack_size, FIBER_FLAG_FLOAT_SWITCH, _mco_wrap_main, co);
|
_mco_fiber* fib = (_mco_fiber*)CreateFiberEx(desc->stack_size, desc->stack_size,
|
||||||
|
FIBER_FLAG_FLOAT_SWITCH, _mco_wrap_main, co);
|
||||||
if (!fib) {
|
if (!fib) {
|
||||||
MCO_LOG("failed to create fiber");
|
MCO_LOG("failed to create fiber");
|
||||||
return MCO_MAKE_CONTEXT_ERROR;
|
return MCO_MAKE_CONTEXT_ERROR;
|
||||||
|
@ -1462,8 +1505,7 @@ static void _mco_destroy_context(mco_coro* co) {
|
||||||
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
||||||
desc->coro_size = _mco_align_forward(sizeof(mco_coro), 16) +
|
desc->coro_size = _mco_align_forward(sizeof(mco_coro), 16) +
|
||||||
_mco_align_forward(sizeof(_mco_context), 16) +
|
_mco_align_forward(sizeof(_mco_context), 16) +
|
||||||
_mco_align_forward(desc->storage_size, 16) +
|
_mco_align_forward(desc->storage_size, 16) + 16;
|
||||||
16;
|
|
||||||
desc->stack_size = stack_size;
|
desc->stack_size = stack_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1493,7 +1535,8 @@ static void _mco_jumpin(mco_coro* co) {
|
||||||
emscripten_fiber_t* back_fib = running_fib;
|
emscripten_fiber_t* back_fib = running_fib;
|
||||||
if (!back_fib) {
|
if (!back_fib) {
|
||||||
back_fib = &main_fib;
|
back_fib = &main_fib;
|
||||||
emscripten_fiber_init_from_current_context(back_fib, main_asyncify_stack, MCO_ASYNCFY_STACK_SIZE);
|
emscripten_fiber_init_from_current_context(back_fib, main_asyncify_stack,
|
||||||
|
MCO_ASYNCFY_STACK_SIZE);
|
||||||
}
|
}
|
||||||
running_fib = &context->fib;
|
running_fib = &context->fib;
|
||||||
context->back_fib = back_fib;
|
context->back_fib = back_fib;
|
||||||
|
@ -1530,7 +1573,8 @@ static mco_result _mco_create_context(mco_coro* co, mco_desc* desc) {
|
||||||
void* asyncify_stack_base = (void*)asyncify_stack_addr;
|
void* asyncify_stack_base = (void*)asyncify_stack_addr;
|
||||||
size_t asyncify_stack_size = co_addr + desc->coro_size - asyncify_stack_addr;
|
size_t asyncify_stack_size = co_addr + desc->coro_size - asyncify_stack_addr;
|
||||||
/* Create the fiber. */
|
/* Create the fiber. */
|
||||||
emscripten_fiber_init(&context->fib, _mco_wrap_main, co, stack_base, stack_size, asyncify_stack_base, asyncify_stack_size);
|
emscripten_fiber_init(&context->fib, _mco_wrap_main, co, stack_base, stack_size,
|
||||||
|
asyncify_stack_base, asyncify_stack_size);
|
||||||
co->context = context;
|
co->context = context;
|
||||||
co->stack_base = stack_base;
|
co->stack_base = stack_base;
|
||||||
co->stack_size = stack_size;
|
co->stack_size = stack_size;
|
||||||
|
@ -1545,12 +1589,10 @@ static void _mco_destroy_context(mco_coro* co) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
||||||
desc->coro_size = _mco_align_forward(sizeof(mco_coro), 16) +
|
desc->coro_size =
|
||||||
_mco_align_forward(sizeof(_mco_context), 16) +
|
_mco_align_forward(sizeof(mco_coro), 16) + _mco_align_forward(sizeof(_mco_context), 16) +
|
||||||
_mco_align_forward(desc->storage_size, 16) +
|
_mco_align_forward(desc->storage_size, 16) + _mco_align_forward(stack_size, 16) +
|
||||||
_mco_align_forward(stack_size, 16) +
|
_mco_align_forward(MCO_ASYNCFY_STACK_SIZE, 16) + 16;
|
||||||
_mco_align_forward(MCO_ASYNCFY_STACK_SIZE, 16) +
|
|
||||||
16;
|
|
||||||
desc->stack_size = stack_size; /* This is just a hint, it won't be the real one. */
|
desc->stack_size = stack_size; /* This is just a hint, it won't be the real one. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1576,9 +1618,11 @@ typedef struct _mco_context {
|
||||||
_asyncify_stack_region stack_region;
|
_asyncify_stack_region stack_region;
|
||||||
} _mco_context;
|
} _mco_context;
|
||||||
|
|
||||||
__attribute__((import_module("asyncify"), import_name("start_unwind"))) void _asyncify_start_unwind(void*);
|
__attribute__((import_module("asyncify"), import_name("start_unwind"))) void _asyncify_start_unwind(
|
||||||
|
void*);
|
||||||
__attribute__((import_module("asyncify"), import_name("stop_unwind"))) void _asyncify_stop_unwind();
|
__attribute__((import_module("asyncify"), import_name("stop_unwind"))) void _asyncify_stop_unwind();
|
||||||
__attribute__((import_module("asyncify"), import_name("start_rewind"))) void _asyncify_start_rewind(void*);
|
__attribute__((import_module("asyncify"), import_name("start_rewind"))) void _asyncify_start_rewind(
|
||||||
|
void*);
|
||||||
__attribute__((import_module("asyncify"), import_name("stop_rewind"))) void _asyncify_stop_rewind();
|
__attribute__((import_module("asyncify"), import_name("stop_rewind"))) void _asyncify_stop_rewind();
|
||||||
|
|
||||||
MCO_NO_INLINE void _mco_jumpin(mco_coro* co) {
|
MCO_NO_INLINE void _mco_jumpin(mco_coro* co) {
|
||||||
|
@ -1594,7 +1638,8 @@ MCO_NO_INLINE void _mco_jumpin(mco_coro* co) {
|
||||||
static MCO_NO_INLINE void _mco_finish_jumpout(mco_coro* co, volatile int rewind_id) {
|
static MCO_NO_INLINE void _mco_finish_jumpout(mco_coro* co, volatile int rewind_id) {
|
||||||
_mco_context* context = (_mco_context*)co->context;
|
_mco_context* context = (_mco_context*)co->context;
|
||||||
int next_rewind_id = context->rewind_id + 1;
|
int next_rewind_id = context->rewind_id + 1;
|
||||||
if(rewind_id == next_rewind_id) { /* Begins unwinding the stack (save locals and call stack to rewind later) */
|
if (rewind_id == next_rewind_id) { /* Begins unwinding the stack (save locals and call stack to
|
||||||
|
rewind later) */
|
||||||
_mco_prepare_jumpout(co);
|
_mco_prepare_jumpout(co);
|
||||||
context->rewind_id = next_rewind_id;
|
context->rewind_id = next_rewind_id;
|
||||||
_asyncify_start_unwind(&context->stack_region);
|
_asyncify_start_unwind(&context->stack_region);
|
||||||
|
@ -1645,11 +1690,9 @@ static void _mco_destroy_context(mco_coro* co) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
static MCO_FORCE_INLINE void _mco_init_desc_sizes(mco_desc* desc, size_t stack_size) {
|
||||||
desc->coro_size = _mco_align_forward(sizeof(mco_coro), 16) +
|
desc->coro_size =
|
||||||
_mco_align_forward(sizeof(_mco_context), 16) +
|
_mco_align_forward(sizeof(mco_coro), 16) + _mco_align_forward(sizeof(_mco_context), 16) +
|
||||||
_mco_align_forward(desc->storage_size, 16) +
|
_mco_align_forward(desc->storage_size, 16) + _mco_align_forward(stack_size, 16) + 16;
|
||||||
_mco_align_forward(stack_size, 16) +
|
|
||||||
16;
|
|
||||||
desc->stack_size = stack_size; /* This is just a hint, it won't be the real one. */
|
desc->stack_size = stack_size; /* This is just a hint, it won't be the real one. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1823,7 +1866,8 @@ mco_result mco_yield(mco_coro* co) {
|
||||||
size_t stack_addr = (size_t)&dummy;
|
size_t stack_addr = (size_t)&dummy;
|
||||||
size_t stack_min = (size_t)co->stack_base;
|
size_t stack_min = (size_t)co->stack_base;
|
||||||
size_t stack_max = stack_min + co->stack_size;
|
size_t stack_max = stack_min + co->stack_size;
|
||||||
if(co->magic_number != MCO_MAGIC_NUMBER || stack_addr < stack_min || stack_addr > stack_max) { /* Stack overflow. */
|
if (co->magic_number != MCO_MAGIC_NUMBER || stack_addr < stack_min ||
|
||||||
|
stack_addr > stack_max) { /* Stack overflow. */
|
||||||
MCO_LOG("coroutine stack overflow, try increasing the stack size");
|
MCO_LOG("coroutine stack overflow, try increasing the stack size");
|
||||||
return MCO_STACK_OVERFLOW;
|
return MCO_STACK_OVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue