diff --git a/Cargo.lock b/Cargo.lock
index 47203b1..f439e76 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2,6 +2,12 @@
# It is not intended for manual editing.
version = 3
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
[[package]]
name = "freezable"
version = "0.1.0"
@@ -16,6 +22,29 @@ dependencies = [
"syn",
]
+[[package]]
+name = "getrandom"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.132"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
+
[[package]]
name = "proc-macro2"
version = "1.0.43"
@@ -34,6 +63,45 @@ dependencies = [
"proc-macro2",
]
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "runtime"
+version = "0.1.0"
+dependencies = [
+ "freezable",
+ "freezable-macro",
+ "rand",
+]
+
[[package]]
name = "syn"
version = "1.0.99"
@@ -50,3 +118,9 @@ name = "unicode-ident"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
diff --git a/Cargo.toml b/Cargo.toml
index ad2ed0f..d4cf8ae 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -2,6 +2,7 @@
resolver = "2"
members = [
"freezable",
- "freezable-macro"
+ "freezable-macro",
+ "runtime"
]
diff --git a/README.md b/README.md
index 86a72c5..65a7ded 100644
--- a/README.md
+++ b/README.md
@@ -4,15 +4,101 @@ This repository is for revealing the magic of the `async` functions.
It tries to uncover every single secret of the `async` concept.
If you are thinking that:
-***There are already some async libraries out there, that is working pretty well. What is the purpose of this one?***
+***There are already some async libraries out there, that is working pretty well.***
+***What is the purpose of this one?***
The answer is: this library is not for re-implementing the async world. It is to show what is going on
behind the scenes in the simplest way possible. This project won't be a 3rd party library. It is a working
-demo of the async world, including all the concepts. Since the goal is to demystify all the concepts
+demo of the async world. Since the goal is to demystify all the concepts
related to the async world, we won't be using any 3rd party library, and also nothing magical from the
-standart library. There won't be any `tokio`, `mio`, `future`, `async`...
+standard library. There won't be any `tokio`, `mio`, `future`, `async`...
We are only allowed to use basic synchronous code in this project.
+
+## Sneak peak at the result we are achieving with this library:
+Click to expand!
+
+The below example is a good demonstration of we were able to run interruptible functions in an asynchronous
+context, and did it from scratch (without using any `async`, `yield`, `future`, or any other magical thing)!
+
+You can supply your own function (which will be turned into it's async version via `freezable-macro`),
+and use it in the main example if you want!
+
+Example output:
+```
+--------------
+STATE OF THE TASK #0: frozen in state: 1, with value: 10
+STATE OF THE TASK #1: frozen in state: 1, with value: 20
+STATE OF THE TASK #2: frozen in state: 1, with value: 30
+---------
+for the task #0, requesting the I/O resource: 8
+for the task #1, requesting the I/O resource: 41
+for the task #2, requesting the I/O resource: 70
+---------
+The I/O resource: 41, is now ready!
+Calling unfreeze on task #1
+STATE OF THE TASK #1: frozen in state: 2, with value: 21
+---------
+for the task #1, requesting the I/O resource: 74
+---------
+The I/O resource: 8, is now ready!
+Calling unfreeze on task #0
+STATE OF THE TASK #0: frozen in state: 2, with value: 11
+---------
+for the task #0, requesting the I/O resource: 26
+---------
+The I/O resource: 26, is now ready!
+Calling unfreeze on task #0
+STATE OF THE TASK #0: frozen in state: 3, with value: 12
+---------
+for the task #0, requesting the I/O resource: 124
+---------
+The I/O resource: 74, is now ready!
+Calling unfreeze on task #1
+STATE OF THE TASK #1: frozen in state: 3, with value: 22
+---------
+for the task #1, requesting the I/O resource: 74
+---------
+The I/O resource: 70, is now ready!
+Calling unfreeze on task #2
+STATE OF THE TASK #2: frozen in state: 2, with value: 31
+---------
+for the task #2, requesting the I/O resource: 101
+---------
+The I/O resource: 124, is now ready!
+Calling unfreeze on task #0
+STATE OF THE TASK #0: Finished!
+---------
+---------
+The I/O resource: 74, is now ready!
+Calling unfreeze on task #1
+STATE OF THE TASK #1: Finished!
+---------
+---------
+The I/O resource: 101, is now ready!
+Calling unfreeze on task #2
+STATE OF THE TASK #2: frozen in state: 3, with value: 32
+---------
+for the task #2, requesting the I/O resource: 144
+---------
+The I/O resource: 144, is now ready!
+Calling unfreeze on task #2
+STATE OF THE TASK #2: Finished!
+---------
+---------
+all tasks are finished!
+```
+
+**in the above example:**
+
+*being awaited I/O resource queue: [8, 41, 70, 74, 26, 124, 74, 101, 144]*
+
+*became ready I/O resource queue: [41, 8, 26, 74, 70, 124, 74, 101, 144]*
+
+
+
+---
+
I prepared a short summary of what is happening when you use the `async` keyword, and how your function is
actually put to sleep and is awaken again. Considering that I've watched 15+ hours of video, and read countless pages
of articles about it, here is a very short summary (believe me, it is the shortest you can
@@ -26,9 +112,9 @@ find that will answer your questions about async) of what is actually happening,
### Async
-***The Main problem:*** *we have some I/O related tasks in our hand (network, file read, etc.).**
-**And we don’t want our code to sit idly while waiting for this I/O task to finish.**
-**It should be able to continue doing some other work while waiting for the result of the I/O task at hand.**
+**The Main problem:** *we have some I/O related tasks in our hand (network, file read, etc.).*
+*And we don’t want our code to sit idly while waiting for this I/O task to finish.*
+*It should be able to continue doing some other work while waiting for the result of the I/O task at hand.*
There are 3 approaches for that (Carl Fredrik Samson explained it better than I can, so quoting from his [book](https://cfsamson.github.io/book-exploring-async-basics/5_strategies_for_handling_io.html)):
@@ -141,14 +227,14 @@ is not ready. So, in the end, we will only stop for `leaf-futures`, or I/O resou
- The network card has a micro-controller in it, so it probably does some polling to check if there
is any answer present from the server. When there is, it notifies the OS. And then OS interrupts the
CPU with a message: “this resource is now ready”.
- - If you can imagine, this whole OS part is another rabbit hole. If we want to implement our own
+ - As you probably guessed already, this OS part is another rabbit hole. If we want to implement our own
functions that can communicate with OS, we will have to dive into specific signal/flags that each
OS use.
- On top of that, each operating system has a different strategy for this notification system
(for example: Epoll for linux, Kqueue for BSD(macOS), and IOCP for Windows). Each will require a
different implementation on our side.
- - This is all too low-level, and implementing this is whole another project
- (`[mio](https://github.com/tokio-rs/mio)`). I don’t think doing that will help demystify
+ - This is all too low-level, and implementing this would be a whole another project
+ ([`mio`](https://github.com/tokio-rs/mio)). I don’t think doing that will help demystify
the `async` concept, and we will be diverging from our main point. If you insist on learning
the details of OS communication, read the above link and all the OS related chapters in there,
and then you might dive into `mio` project.
@@ -162,12 +248,11 @@ is not ready. So, in the end, we will only stop for `leaf-futures`, or I/O resou
can stop a function, and with a signal from the OS, we can continue on this function. So we can do
this instead:
- spawn a thread, that will be the simulation of the OS
- - send our task’s ID (in place of some I/O resource subscription) to this thread (the OS)
- - and the OS will just wait for some random time, and then notify us that the requested resource
- (ID for our case) is ready.
+ - send the subscribed I/O resource ID to the (in place of some I/O resource subscription) to this thread (the OS)
+ - and the OS will just wait for some random time, and then notify us that the requested resource is ready.
1. We covered the communication part with the OS. But how will the rest work? Who will awake (`unfreeze`)
the tasks? One way to implement this is:
- - have a thread (executor) that will run the these async tasks. Btw, this does not have to be an
+ - have a thread (`executor`) that will run the these async tasks. Btw, this does not have to be an
extra thread, it can be the main thread as well
- and have another thread that will listen to the signals sent by the OS, and somehow notify the
executor about this, so that executor will know which task is available for further progress
@@ -213,8 +298,7 @@ the tasks? One way to implement this is:
-We won't be diving into assembly or machine code, but we will not use any 3rd party library,
-and implement every magical thing from scratch, including:
+We will not use any 3rd party library, and implement every magical thing from scratch, including:
- the types/traits:
- `Futures`,
- `Generators`,
@@ -232,24 +316,23 @@ and implement every magical thing from scratch, including:
## The Roadmap:
1. Implement `Freezable`:
- 1. The most important ability of the `async` world is: being able to stop a function, and continue from where we left off.
+ - The most important ability of the `async` world is: being able to stop a function, and continue from where we left off.
Async functions are able to do this, via `Generators`. And `Generators` are able to do this via the `yield` keyword.
So we should implement a library, that will demonstrate how we can `freeze` a function,
and continue to it when we feel like it.
- 1. We will see that, we can write a `freezable` function. But in order to make another person's function turn into a
+ - We will see that, we can write a `freezable` function. But in order to make another person's function turn into a
`freezable` function, we will have to write a code, that should write a code for us. So, the next step is:
*write a macro, that will turn the given function into a `freezable` function*.
2. Implement a `Reactor` and an `Executor`
- 1. Now, our `freezable` functions are ready to use, but in the async world,
+ - Now, our `freezable` functions are ready to use, but in the async world,
we are not continuing our functions when we please to.
They are being *frozen* when the IO resource is not ready, and getting *awaken* when that resource is ready.
- 1. simulate the OS with an external thread, since dealing with OS's and IO's are whole another project
+ - simulate the OS with an external thread, since dealing with OS's and IO's are whole another project
on its own, [`mio`](https://github.com/tokio-rs/mio) (details are discussed below).
- 1. implement a `Reactor` that will listen to the signals of the OS, and can relay the information
+ - implement a `Reactor` that will listen to the signals of the OS, and can relay the information
to the `Executor` about which task should be polled next.
- 1. write another macro, that can poll, and continue on many `freezable` tasks in a single thread.
- This will be our `Executor`, which will try to call `unfreeze()` on the the tasks that are told
- by the `Reactor`
+ - implement an `Executor` which will try to call `unfreeze()` on the the tasks that are told by the
+ `Reactor`, and continue on many `freezable` tasks in a single thread.
diff --git a/freezable-macro/src/bin/main.rs b/freezable-macro/src/bin/freezable-macro/main.rs
similarity index 100%
rename from freezable-macro/src/bin/main.rs
rename to freezable-macro/src/bin/freezable-macro/main.rs
diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml
new file mode 100644
index 0000000..2ab810f
--- /dev/null
+++ b/runtime/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "runtime"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+freezable = { version = "0.1.0", path = "../freezable"}
+freezable-macro = { version = "0.1.0", path = "../freezable-macro"}
+rand = "0.8.5"
+
+[features]
+printable_states = []
diff --git a/runtime/README.md b/runtime/README.md
new file mode 100644
index 0000000..5fabe8c
--- /dev/null
+++ b/runtime/README.md
@@ -0,0 +1,178 @@
+# Introduction
+`freezable` functions can be used as `generators`, `iterators`, or `async` functions.
+In this example, I wanted to demonstrate how to use `freezable` functions as `async` functions, and run
+several of them asynchronously.
+
+# The Binary
+## How to run the binary
+
+- To run the application and see the whole output: `cargo run --bin runtime --features=printable_states`
+- If you do not want to print the states of the tasks for each checkpoint,
+run with: `cargo run --bin runtime`
+
+Example output:
+```
+Here we go!
+--------------
+STATE OF THE TASK #0: frozen in state: 1, with value: 10
+STATE OF THE TASK #1: frozen in state: 1, with value: 20
+STATE OF THE TASK #2: frozen in state: 1, with value: 30
+---------
+for the task #0, requesting the I/O resource: 8
+for the task #1, requesting the I/O resource: 41
+for the task #2, requesting the I/O resource: 70
+---------
+The I/O resource: 41, is now ready!
+Calling unfreeze on task #1
+STATE OF THE TASK #1: frozen in state: 2, with value: 21
+---------
+for the task #1, requesting the I/O resource: 74
+---------
+The I/O resource: 8, is now ready!
+Calling unfreeze on task #0
+STATE OF THE TASK #0: frozen in state: 2, with value: 11
+---------
+for the task #0, requesting the I/O resource: 26
+---------
+The I/O resource: 26, is now ready!
+Calling unfreeze on task #0
+STATE OF THE TASK #0: frozen in state: 3, with value: 12
+---------
+for the task #0, requesting the I/O resource: 124
+---------
+The I/O resource: 74, is now ready!
+Calling unfreeze on task #1
+STATE OF THE TASK #1: frozen in state: 3, with value: 22
+---------
+for the task #1, requesting the I/O resource: 74
+---------
+The I/O resource: 70, is now ready!
+Calling unfreeze on task #2
+STATE OF THE TASK #2: frozen in state: 2, with value: 31
+---------
+for the task #2, requesting the I/O resource: 101
+---------
+The I/O resource: 124, is now ready!
+Calling unfreeze on task #0
+STATE OF THE TASK #0: Finished!
+---------
+---------
+The I/O resource: 74, is now ready!
+Calling unfreeze on task #1
+STATE OF THE TASK #1: Finished!
+---------
+---------
+The I/O resource: 101, is now ready!
+Calling unfreeze on task #2
+STATE OF THE TASK #2: frozen in state: 3, with value: 32
+---------
+for the task #2, requesting the I/O resource: 144
+---------
+The I/O resource: 144, is now ready!
+Calling unfreeze on task #2
+STATE OF THE TASK #2: Finished!
+---------
+---------
+all tasks are finished!
+```
+
+**in the above example:**
+
+*being awaited I/O resource queue: [8, 41, 70, 74, 26, 124, 74, 101, 144]*
+
+*became ready I/O resource queue: [41, 8, 26, 74, 70, 124, 74, 101, 144]*
+
+This example is a good demonstration of we were able to run interruptible functions in an asynchronous
+context, and did it from scratch (without using any `async`, `yield`, or `future`)!
+
+
+# The Library
+
+This library provides 3 things:
+- os-simulation
+- reactor
+- executor
+
+## OS-Simulation
+**why simulate, but not directly implement the communication with OS (a.k.a `mio`)?**
+
+Recall that this project's main goal is to demystify async concepts and demonstrate functions can be
+interrupted and can be continued on again from where they left off. Interacting with the OS
+(Epoll for linux, Kqueue for BSD(macOS), and IOCP for Windows) is a rabbit hole on its own as mentioned
+in several other places in this project, and implementing this will make this project much more complicated
+than it already is. On top of that, interaction with the OS part is not directly helping us to
+uncover the secrets of interruptible functions.
+
+**how to think OS as a black-box for this project?**
+
+If we look at the OS from the perspective of the async world, it is just a message relayer that telling us
+some I/O resource we are awaiting on has become ready. In other words: we will subscribe to an event
+on the OS side, then wait for some unknown time, then the OS will somehow notify us that the subscribed event
+is ready now.
+
+So, we can simulate the OS as the following:
+1. we will spawn a thread (this will be `OS`)
+2. we will be able to send some integers to this thread. These integers will represent the ID of the resource
+3. our thread (`OS`) will wait for some random time for each resource_id (the integer we sent to it).
+This random wait will simulate the resource becoming ready.
+4. then this thread (`OS`) will notify `the Reactor` when the subscribed resource_id has become ready.
+
+## The Reactor and The Executor
+
+The reactor gets the events that we are awaiting on from the `Executor`, and subscribes to necessary
+I/O events on the OS side. When the OS notifies the `Reactor` on some events/resource being ready,
+the Reactor then notifies the `Executor` about these.
+
+In practice, when the `Reactor` receives a notification from the OS about a resource being ready,
+it will then call the `Waker.wake()` on the waker objects of the relevant tasks (the tasks that are waiting
+for that resource), making them ready to be polled for the `Executor`
+(refer to the root README for further details).
+
+Again, to keep this project as simple as possible, I choose to abstract away this `Waker` concept as well.
+In practice, these `Waker` objects are created/initialized by the `Executor`, and the `Reactor` has access
+to these `Waker` objects, so it can call the `Waker.wake()` method on them when a relevant notification
+arrives from the `OS`. The information of the relevant I/O resource is stored in the `Waker` object,
+so `Reactor` can now which `Waker` to call when a notification arrives (this is an oversimplification).
+
+In summary, the `Executor` initializes the `Waker`, which stores which I/O resource is related to which task.
+Then, the `Reactor` calls `Waker.wake()` when it receives a notification from the OS.
+
+To make things simpler, here is what we are going to do:
+1. The `Executor` will create mapping between the tasks and the awaited I/O resource for these tasks.
+Then send the ID of the necessary I/O resource to the `Reactor`. This makes sense, since `Executor`
+was responsible from initializing the `Waker` object.
+2. Then, the `Reactor` will listen to the notifications from the OS, and then notify
+the `Executor` about these. This also makes, since the `Reactor` should be the one listening to the `OS`.
+3. Then, the `Executor` will use the mapping between I/O resources and tasks, and call `unfreeze()` on the
+relevant tasks.
+
+*note: in practice, `Reactor` is awaking the tasks using `Waker.wake()`. Then the executor polls the*
+*awakened tasks. In this example, however, there are no waking mechanisms. The executor will know which*
+*task to poll directly.*
+
+*another note: you may realize that, `Reactor` is not that useful in our scenario. The `Executor` could*
+*have directly subscribed to the `OS`, and eliminate the `Reactor` from the equation. In fact, this*
+*is another strategy in practice too. The reason that I included a `Reactor` is: it shall be easier to*
+*understand more complex mechanisms where `Reactor` is included after understanding this example.*
+
+
+### Caveat about the `Executor`
+The tasks given to the `Executor` needs to be the same type. This restriction can be eliminated
+with `Vec>`. But I did not like the idea of storing all the async tasks in the heap
+for this project. If we are to utilize the heap for massive memory allocation, we could as well store the
+necessary information related to each state in the heap, and have a much simpler state machine structire
+as well.
+
+Not using the heap is also good for the embedded systems (some machines do not have heap, or very
+limited heap). I want this example to be as simple as possible. So that it would serve as a stepping stone
+for what can be further done.
+
+Another strategy for eliminating requirement of the tasks being the same type, would be using
+macros (take a look at `select` from tokio: https://docs.rs/tokio/latest/tokio/macro.select.html).
+
+*Note that macros are utilized in practice (in tokio), instead of heaps :)*
+
+The same structure you see in the link can be easily applied to our `Executor` as well,
+but again for simplicity, I've chosen to keep it as is. In the end, the `Executor`'s aim is prove that
+concurrently running some interruptible tasks in a single thread is possible. And it is accomplishing
+this goal :)
diff --git a/runtime/src/bin/runtime/main.rs b/runtime/src/bin/runtime/main.rs
new file mode 100644
index 0000000..f734e80
--- /dev/null
+++ b/runtime/src/bin/runtime/main.rs
@@ -0,0 +1,79 @@
+use runtime::runtime;
+
+use freezable::{Freezable, FreezableGenerator4};
+
+#[cfg(not(feature = "printable_states"))]
+use freezable::{FreezableError, FreezableState};
+#[cfg(not(feature = "printable_states"))]
+use freezable_macro::freezable;
+
+// LOOK BELOW (main_custom function) FOR THE CUSTOM EXAMPLE THAT YOU CAN USE YOUR OWN FUNCTION!
+
+fn main() {
+ println!(
+ "Did you know you can easily create own `freezable` functions via our macro?
+ It's only 5 cents per function!
+ Joking, just look below on how to do it, I prepared a template for you :)"
+ );
+ println!("Initializing 3 async tasks...");
+ println!("These 3 async tasks will produce 4 values each.");
+ println!("The first one will start from 10, and produce 11, 12, 13");
+ println!("The second one will start from 20, and produce 21, 22, 23");
+ println!("And the third one will start from 30, and produce 31, 32, 33");
+ println!(
+ "\n You can track the progress of the tasks easily,
+ for example: if the value at the current state is 12,
+ that means the 1st task is on the 2nd state"
+ );
+ println!(
+ "\nOne can imagine all these tasks will retrieve all these values from a website,
+ or reading them from a file, so depending on the I/O resource being ready,
+ the relevant task will progress to completion."
+ );
+ println!(
+ "\nThis binary is demonstrating that we are able to stop and continue on the tasks,
+ with a single thread"
+ );
+ println!("\nHere we go!");
+ println!("--------------");
+
+ let async_task1 = FreezableGenerator4::start(10);
+ let async_task2 = FreezableGenerator4::start(20);
+ let async_task3 = FreezableGenerator4::start(30);
+ let mut tasks = vec![async_task1, async_task2, async_task3];
+
+ runtime(&mut tasks);
+
+ assert!(tasks.iter().all(|task| task.is_finished()));
+}
+
+// if you want to use your custom function, use this function instead of the actual `main`
+#[cfg(not(feature = "printable_states"))]
+#[allow(dead_code)]
+fn main_custom() {
+ println!("Running your custom functions!");
+ println!("--------------");
+
+ #[freezable]
+ // simply, change the function `freezable_generator_4` below with your own function
+ fn freezable_generator_4(begin: u8) -> u8 {
+ let mut next: u8 = begin;
+ freeze!(next); // freezes the function, but also return the partial result
+ next += 1;
+ freeze!(next);
+ next += 1;
+ freeze!(next);
+ next += 1;
+ next
+ }
+
+ // change the `freezbale_generator_4` names bloew with your function's name
+ let async_task1 = freezable_generator_4::start(10);
+ let async_task2 = freezable_generator_4::start(20);
+ let async_task3 = freezable_generator_4::start(30);
+ let mut tasks = vec![async_task1, async_task2, async_task3];
+
+ runtime(&mut tasks);
+
+ assert!(tasks.iter().all(|task| task.is_finished()));
+}
diff --git a/runtime/src/executor.rs b/runtime/src/executor.rs
new file mode 100644
index 0000000..2d89143
--- /dev/null
+++ b/runtime/src/executor.rs
@@ -0,0 +1,129 @@
+#[cfg(feature = "printable_states")]
+use freezable::FreezableGenerator4;
+
+use freezable::Freezable;
+use rand::Rng;
+use std::collections::HashMap;
+use std::sync::mpsc::{Receiver, Sender};
+
+/// Simulates the `Executor`
+///
+/// Calls `unfreeze()` on the tasks when they first arrive,
+/// then puts them in a queue, and awaits on the relevant I/O resources to become ready.
+/// When these resources are ready, calls `unfreeze` on the relevant tasks
+///
+/// You can compare this Executor to the `select` of `tokio` or `futures` if you like.
+/// This Executor's aim is prove that concurrently running some interruptible tasks
+/// in a single thread is possible.
+///
+/// Assumes the current states of the tasks given to this Executor are not:
+/// `Finished` or `Cancelled`
+///
+/// This executor does not do any error handling for simplicity. It just ignores the errors.
+pub fn start_executor(
+ #[cfg(not(feature = "printable_states"))] tasks: &mut [impl Freezable],
+ #[cfg(feature = "printable_states")] tasks: &mut [FreezableGenerator4],
+ event_sender: Sender,
+ awake_signal_recv: Receiver,
+) {
+ let mut rng = rand::thread_rng();
+ let mut task_event_map: HashMap = HashMap::new();
+
+ #[cfg(not(feature = "printable_states"))]
+ {
+ // call the first unfreeze() on all the tasks
+ tasks.iter_mut().for_each(|task| {
+ let _ = task.unfreeze();
+ });
+ }
+
+ #[cfg(feature = "printable_states")]
+ {
+ // call the first unfreeze() on all the tasks
+ tasks.iter_mut().enumerate().for_each(|(id, task)| {
+ let _ = task.unfreeze();
+ print!("STATE OF THE TASK #{id}: ");
+ print_state(task);
+ });
+ }
+
+ println!("---------");
+
+ // for each non finished task, identify the event (I/O resource)
+ // and send it to the reactor
+ tasks.iter().enumerate().for_each(|(task_id, task)| {
+ if !task.is_finished() {
+ let event_id: u8 = rng.gen();
+ println!("for the task #{task_id}, requesting the I/O resource: {event_id}");
+ task_event_map.insert(task_id, event_id);
+ event_sender
+ .send(event_id)
+ .expect("Reactor should be listening");
+ }
+ });
+
+ println!("---------");
+
+ loop {
+ // the executor has nothing to do but to execute tasks
+ // in order to execute tasks, we need to know the corresponding I/O
+ // resources are not ready, so we better wait till we hear a message
+ // from the `Reactor`
+ if let Ok(resource_id) = awake_signal_recv.recv() {
+ println!("The I/O resource: {resource_id}, is now ready!");
+
+ // for each task that relies on the resource that is ready now,
+ // call `unfreeze()` on them
+ let mut progressing_tasks = vec![];
+ task_event_map.iter().for_each(|(&task_id, &event_id)| {
+ if resource_id == event_id {
+ println!("Calling unfreeze on task #{task_id}");
+ let _ = tasks[task_id].unfreeze();
+ #[cfg(feature = "printable_states")]
+ {
+ print!("STATE OF THE TASK #{task_id}: ");
+ print_state(&tasks[task_id]);
+ }
+ progressing_tasks.push(task_id);
+ }
+ });
+
+ println!("---------");
+
+ // if the task is still not finished after being progressed,
+ // send another event_id to the `Reactor`, so that it can notify us later
+ progressing_tasks.iter().for_each(|&task_id| {
+ if !tasks[task_id].is_finished() {
+ let event_id: u8 = rng.gen();
+ println!("for the task #{task_id}, requesting the I/O resource: {event_id}");
+ task_event_map.insert(task_id, event_id);
+ event_sender
+ .send(event_id)
+ .expect("Reactor should be listening");
+ }
+ });
+
+ println!("---------");
+
+ // if all the tasks are finished :)
+ if tasks.iter().all(|task| task.is_finished()) {
+ println!("all tasks are finished!");
+ break;
+ }
+ } else {
+ panic!("Reactor closed the sender!");
+ }
+ }
+}
+
+#[cfg(feature = "printable_states")]
+fn print_state(task: &FreezableGenerator4) {
+ match task {
+ FreezableGenerator4::Chunk0(val) => println!("frozen in state: 0, with value: {val}"),
+ FreezableGenerator4::Chunk1(val) => println!("frozen in state: 1, with value: {val}"),
+ FreezableGenerator4::Chunk2(val) => println!("frozen in state: 2, with value: {val}"),
+ FreezableGenerator4::Chunk3(val) => println!("frozen in state: 3, with value: {val}"),
+ FreezableGenerator4::Finished => println!("Finished!"),
+ FreezableGenerator4::Cancelled => println!("Cancelled"),
+ }
+}
diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs
new file mode 100644
index 0000000..8be16bb
--- /dev/null
+++ b/runtime/src/lib.rs
@@ -0,0 +1,44 @@
+mod executor;
+mod os_simulation;
+mod reactor;
+
+pub use executor::start_executor;
+pub use os_simulation::simulate_os;
+pub use reactor::start_reactor;
+
+#[cfg(feature = "printable_states")]
+use freezable::FreezableGenerator4;
+
+use freezable::Freezable;
+use std::sync::mpsc;
+use std::thread;
+
+/// Runs the given `freezable` tasks to completion asynchronously
+///
+/// Uses the `Executor`, `Reactor`, and the `simulate_os` for that
+///
+/// you can create your custom `freezable` tasks via using the `freezable-macro`,
+/// and supply them to `runtime` via the `tasks` argument, and have the most fun time of your life!
+pub fn runtime(
+ #[cfg(not(feature = "printable_states"))] tasks: &mut [impl Freezable],
+ #[cfg(feature = "printable_states")] tasks: &mut [FreezableGenerator4],
+) {
+ let (subscription_sender, subscription_recv) = mpsc::channel();
+ let (notification_sender, notification_recv) = mpsc::channel();
+ let (event_sender, event_recv) = mpsc::channel();
+ let (awake_signal_sender, awake_signal_recv) = mpsc::channel();
+
+ thread::spawn(|| simulate_os(notification_sender, subscription_recv));
+ thread::spawn(|| {
+ start_reactor(
+ event_recv,
+ awake_signal_sender,
+ subscription_sender,
+ notification_recv,
+ )
+ });
+
+ start_executor(tasks, event_sender, awake_signal_recv);
+
+ assert!(tasks.iter().all(|task| task.is_finished()));
+}
diff --git a/runtime/src/os_simulation.rs b/runtime/src/os_simulation.rs
new file mode 100644
index 0000000..3b3b5d5
--- /dev/null
+++ b/runtime/src/os_simulation.rs
@@ -0,0 +1,44 @@
+use rand::Rng;
+use std::collections::HashMap;
+use std::sync::mpsc::{Receiver, Sender};
+use std::{thread, time};
+
+/// Simulates the os via waiting for a random amount of time for the requested resource,
+/// then notifies the subscriber on the awaited resource.
+pub fn simulate_os(notification_sender: Sender, subscription_recv: Receiver) {
+ let mut rng = rand::thread_rng();
+ let mut current_turn = 0;
+ let mut resource_map = HashMap::::new();
+
+ loop {
+ // try to receive message immediately without blocking (try_recv)
+ // because there may not be any message, and the OS needs to keep track of the
+ // events in the meantime. So it will quickly listen for a new subscription,
+ // and move on to its own things, the new subscription request will be handled in the next turn
+ // that's why, keep the loop short
+ if let Ok(resource_id) = subscription_recv.try_recv() {
+ // if resource id is new
+ resource_map.entry(resource_id).or_insert_with(|| {
+ let turns: u8 = rng.gen_range(1..10); // how many turns (100 milliseconds) does this resource take to be ready
+ current_turn + turns
+ });
+ }
+ // for each resource that is ready, send the notification
+ let mut to_be_removed: Vec = vec![];
+ resource_map.iter().for_each(|(&resource, &turn)| {
+ if current_turn == turn {
+ to_be_removed.push(resource)
+ }
+ });
+ // can't mutate the resource map in the above iteration, hence this is necessary
+ to_be_removed.iter().for_each(|&resource| {
+ resource_map.remove(&resource);
+ if notification_sender.send(resource).is_err() {
+ panic!("The Reactor is not listening");
+ }
+ });
+
+ thread::sleep(time::Duration::from_millis(100)); // ZA WARUDO
+ current_turn += 1;
+ }
+}
diff --git a/runtime/src/reactor.rs b/runtime/src/reactor.rs
new file mode 100644
index 0000000..1a9b618
--- /dev/null
+++ b/runtime/src/reactor.rs
@@ -0,0 +1,58 @@
+use std::collections::HashSet;
+use std::sync::mpsc::{Receiver, Sender};
+use std::{thread, time};
+
+/// Simulates the `Reactor`
+///
+/// Gets the events that we are awaiting on from the `Executor`, and subscribes to necessary
+/// I/O events on the OS side. When the OS notifies the `Reactor` on some events/resource being ready,
+/// the Reactor then notifies the `Executor` about these.
+///
+/// `event` and `resource` corresponds to the same thing,
+/// but we will use `event` for the communication between `executor` <-> `reactor`
+/// and `resource` between `reactor` <-> `OS`.
+///
+/// In this example, the `Reactor` may not make much sense because of all the simplifications we made
+/// the `Executor` could do the job of `Reactor` as well. So why did I put `Reactor` in here?
+/// Because in practice, the `Reactor` is very useful (unlike this example). And I believe this example
+/// will make it much more easier to understand the real `Reactor`.
+///
+/// In practice, the `Reactor` will listen to the OS for the notifications,
+/// and then call the `Waker.wake()` on the relevant tasks, making them ready to be polled for
+/// the `Executor`. Refer to the root README for further details
+
+pub fn start_reactor(
+ event_recv: Receiver,
+ awake_signal_sender: Sender,
+ subscription_sender: Sender,
+ notification_recv: Receiver,
+) {
+ let mut events = HashSet::new();
+
+ loop {
+ // try to receive the interested events (the events that executor is waiting on)
+ // immediately without blocking (try_recv), because we still need to continuously
+ // listen to the notifications from the OS
+ if let Ok(event_id) = event_recv.try_recv() {
+ // if event id is new
+ if !events.contains(&event_id) {
+ events.insert(event_id);
+ subscription_sender
+ .send(event_id)
+ .expect("OS should always be listening");
+ }
+ // else is omitted, since more than 1 task may be related to the same event, it is ok
+ }
+
+ if let Ok(resource_id) = notification_recv.try_recv() {
+ if !events.remove(&resource_id) {
+ panic!("the completed resource is not in the Reactor's list!");
+ }
+ awake_signal_sender
+ .send(resource_id)
+ .expect("Executor should be always listening");
+ }
+
+ thread::sleep(time::Duration::from_millis(10)); // ZA WARUDO
+ }
+}
diff --git a/runtime/tests/integration.rs b/runtime/tests/integration.rs
new file mode 100644
index 0000000..80a8252
--- /dev/null
+++ b/runtime/tests/integration.rs
@@ -0,0 +1,126 @@
+use runtime::{runtime, simulate_os, start_executor, start_reactor};
+
+use freezable::FreezableGenerator4;
+use freezable::{Freezable, FreezableError, FreezableState};
+use freezable_macro::freezable;
+use rand::Rng;
+use std::collections::HashSet;
+use std::sync::mpsc;
+use std::{thread, time};
+
+#[test]
+fn os_simulation() {
+ let mut rng = rand::thread_rng();
+
+ let (subscription_sender, subscription_recv) = mpsc::channel();
+ let (notification_sender, notification_recv) = mpsc::channel();
+ thread::spawn(|| simulate_os(notification_sender, subscription_recv));
+
+ // 5 resources to be subscribed
+ let mut resource_set = HashSet::new();
+ for _ in 0..5 {
+ resource_set.insert(rng.gen::());
+ }
+
+ // requesting 5 random resources with 100 millisecond interval
+ for &resource in &resource_set {
+ subscription_sender.send(resource).unwrap();
+ }
+
+ for _ in 0..resource_set.len() {
+ let resource = notification_recv.recv().unwrap();
+ assert!(resource_set.remove(&resource));
+ }
+
+ assert!(resource_set.is_empty())
+}
+
+#[test]
+fn reactor_and_os_simulation() {
+ let mut rng = rand::thread_rng();
+
+ let (subscription_sender, subscription_recv) = mpsc::channel();
+ let (notification_sender, notification_recv) = mpsc::channel();
+ let (event_sender, event_recv) = mpsc::channel();
+ let (awake_signal_sender, awake_signal_recv) = mpsc::channel();
+
+ thread::spawn(|| simulate_os(notification_sender, subscription_recv));
+ thread::spawn(|| {
+ start_reactor(
+ event_recv,
+ awake_signal_sender,
+ subscription_sender,
+ notification_recv,
+ )
+ });
+
+ // there are 5 events that we are interested in (we are awaiting on these 5 I/O resources)
+ let mut event_set = HashSet::new();
+ for _ in 0..5 {
+ event_set.insert(rng.gen::());
+ }
+
+ // telling to the `Reactor` that we are interested in 5 events with 100 millisecond interval
+ for &event in &event_set {
+ event_sender.send(event).unwrap();
+ thread::sleep(time::Duration::from_millis(100)); // ZA WARUDO
+ }
+
+ for _ in 0..event_set.len() {
+ let event = awake_signal_recv.recv().unwrap();
+ assert!(event_set.remove(&event));
+ }
+
+ assert!(event_set.is_empty())
+}
+
+#[test]
+fn executor_reactor_and_os_simulation() {
+ let async_task1 = FreezableGenerator4::start(3);
+ let async_task2 = FreezableGenerator4::start(7);
+ let async_task3 = FreezableGenerator4::start(10);
+ let mut tasks = vec![async_task1, async_task2, async_task3];
+
+ let (subscription_sender, subscription_recv) = mpsc::channel();
+ let (notification_sender, notification_recv) = mpsc::channel();
+ let (event_sender, event_recv) = mpsc::channel();
+ let (awake_signal_sender, awake_signal_recv) = mpsc::channel();
+
+ thread::spawn(|| simulate_os(notification_sender, subscription_recv));
+ thread::spawn(|| {
+ start_reactor(
+ event_recv,
+ awake_signal_sender,
+ subscription_sender,
+ notification_recv,
+ )
+ });
+
+ start_executor(&mut tasks, event_sender, awake_signal_recv);
+
+ assert!(tasks.iter().all(|task| task.is_finished()));
+}
+
+#[test]
+fn runtime_with_macro() {
+ #[freezable]
+ fn freezable_generator_4(begin: u8) -> u8 {
+ let mut next: u8 = begin;
+ freeze!(next); // freezes the function, but also return the partial result
+ next += 1;
+ freeze!(next);
+ next += 1;
+ freeze!(next);
+ next += 1;
+ next
+ }
+
+ let async_task1 = freezable_generator_4::start(3);
+ let async_task2 = freezable_generator_4::start(7);
+ let async_task3 = freezable_generator_4::start(12);
+ let mut tasks = vec![async_task1, async_task2, async_task3];
+
+ runtime(&mut tasks);
+
+ assert!(tasks.iter().all(|task| task.is_finished()));
+}