added code for chapter 2

This commit is contained in:
Carl Fredrik Samson
2020-01-25 20:20:43 +01:00
parent 7ad6a3986e
commit 7cf660c1f1
28 changed files with 2079 additions and 147 deletions

View File

@@ -55,21 +55,29 @@ As you see from the output after running this, the sizes of the references varie
Most are 8 bytes (which is a pointer size on 64 bit systems), but some are 16
bytes.
The reason for this varies. For example, in the case of `&[i32]` the first 8
bytes is the pointer to the first element in an array, and the second 8 bytes is
the length of the slice.
The 16 byte sized pointers are called "fat pointers" since they carry more extra
information.
**In the case of `&[i32]`:**
- The first 8 bytes is the actual pointer to the first element in the array
(or part of an array the slice refers to)
- The second 8 bytes is the length of the slice.
The one we'll concern ourselves about is the references to traits, or
_trait objects_ as they're called in Rust when we're referring to any type that
implements this trait. `&dyn SomeTrait` is a pointer to such a _trait object_
and as you see it has a size of 16 bytes.
_trait objects_ as they're called in Rust.
`&dyn SomeTrait` is an example of a _trait object_
The layout for a pointer to a _trait object_ looks like this:
- The first 8 bytes points to the `data` for the trait object
- The second 8 bytes points to the `vtable` for the trait object
When implementing a `Trait` you pass in `self` as the first parameter. The
`data` pointer is a pointer to this `self` object which is then passed in to
the functions in the `vtable`.
The reason for this is to allow us to refer to an object we know nothing about
except that it implements the methods defined by our trait. To allow this we use
dynamic dispatch.
Let's explain this in code instead of words by implementing our own trait
object from these parts:
@@ -137,11 +145,13 @@ fn main() {
```
If you run this code by pressing the "play" button at the top you'll se it
outputs just what we expect. This code example is editable so you can change it
outputs just what we expect.
This code example is editable so you can change it
and run it to see what happens.
The reason we go through this will be apparent when we implement our own
`Waker` later on since we'll actually set up a `vtable` like we do here to
define methods like `wake`.
The reason we go through this will be clear later on when we implement our own
`Waker` we'll actually set up a `vtable` like we do here to and knowing what
it is will make this much less mysterious.
Let's move on to actually implement an example.
With that out of the way, let's move on to our main example.

View File

@@ -0,0 +1,266 @@
# Naive example
```rust
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex};
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
fn main() {
let readylist = Arc::new(Mutex::new(vec![]));
let mut reactor = Reactor::new();
let mywaker = MyWaker::new(1, thread::current(), readylist.clone());
reactor.register(2, mywaker);
let mywaker = MyWaker::new(2, thread::current(), readylist.clone());
reactor.register(2, mywaker);
executor_run(reactor, readylist);
}
// ====== EXECUTOR ======
fn executor_run(mut reactor: Reactor, rl: Arc<Mutex<Vec<usize>>>) {
let start = Instant::now();
loop {
let mut rl_locked = rl.lock().unwrap();
while let Some(event) = rl_locked.pop() {
let dur = (Instant::now() - start).as_secs_f32();
println!("Event {} just happened at time: {:.2}.", event, dur);
reactor.outstanding.fetch_sub(1, Ordering::Relaxed);
}
drop(rl_locked);
if reactor.outstanding.load(Ordering::Relaxed) == 0 {
reactor.close();
break;
}
thread::park();
}
}
// ====== "FUTURE" IMPL ======
#[derive(Debug)]
struct MyWaker {
id: usize,
thread: thread::Thread,
readylist: Arc<Mutex<Vec<usize>>>,
}
impl MyWaker {
fn new(id: usize, thread: thread::Thread, readylist: Arc<Mutex<Vec<usize>>>) -> Self {
MyWaker {
id,
thread,
readylist,
}
}
fn wake(&self) {
self.readylist.lock().map(|mut rl| rl.push(self.id)).unwrap();
self.thread.unpark();
}
}
#[derive(Debug, Clone)]
pub struct Task {
id: usize,
pending: bool,
}
// ===== REACTOR =====
struct Reactor {
dispatcher: Sender<Event>,
handle: Option<JoinHandle<()>>,
outstanding: AtomicUsize,
}
#[derive(Debug)]
enum Event {
Close,
Simple(MyWaker, u64),
}
impl Reactor {
fn new() -> Self {
let (tx, rx) = channel::<Event>();
let mut handles = vec![];
let handle = thread::spawn(move || {
// This simulates some I/O resource
for event in rx {
match event {
Event::Close => break,
Event::Simple(mywaker, duration) => {
let event_handle = thread::spawn(move || {
thread::sleep(Duration::from_secs(duration));
mywaker.wake();
});
handles.push(event_handle);
}
}
}
for handle in handles {
handle.join().unwrap();
}
});
Reactor {
dispatcher: tx,
handle: Some(handle),
outstanding: AtomicUsize::new(0),
}
}
fn register(&mut self, duration: u64, mywaker: MyWaker) {
self.dispatcher
.send(Event::Simple(mywaker, duration))
.unwrap();
self.outstanding.fetch_add(1, Ordering::Relaxed);
}
fn close(&mut self) {
self.dispatcher.send(Event::Close).unwrap();
}
}
impl Drop for Reactor {
fn drop(&mut self) {
self.handle.take().map(|h| h.join().unwrap()).unwrap();
}
}
```
```rust, editable
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex};
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
fn main() {
let readylist = Arc::new(Mutex::new(vec![]));
let mut reactor = Reactor::new();
let mywaker = MyWaker::new(1, thread::current(), readylist.clone());
reactor.register(2, mywaker);
let mywaker = MyWaker::new(2, thread::current(), readylist.clone());
reactor.register(2, mywaker);
executor_run(reactor, readylist);
}
# // ====== EXECUTOR ======
# fn executor_run(mut reactor: Reactor, rl: Arc<Mutex<Vec<usize>>>) {
# let start = Instant::now();
# loop {
# let mut rl_locked = rl.lock().unwrap();
# while let Some(event) = rl_locked.pop() {
# let dur = (Instant::now() - start).as_secs_f32();
# println!("Event {} just happened at time: {:.2}.", event, dur);
# reactor.outstanding.fetch_sub(1, Ordering::Relaxed);
# }
# drop(rl_locked);
#
# if reactor.outstanding.load(Ordering::Relaxed) == 0 {
# reactor.close();
# break;
# }
#
# thread::park();
# }
# }
#
# // ====== "FUTURE" IMPL ======
# #[derive(Debug)]
# struct MyWaker {
# id: usize,
# thread: thread::Thread,
# readylist: Arc<Mutex<Vec<usize>>>,
# }
#
# impl MyWaker {
# fn new(id: usize, thread: thread::Thread, readylist: Arc<Mutex<Vec<usize>>>) -> Self {
# MyWaker {
# id,
# thread,
# readylist,
# }
# }
#
# fn wake(&self) {
# self.readylist.lock().map(|mut rl| rl.push(self.id)).unwrap();
# self.thread.unpark();
# }
# }
#
#
# #[derive(Debug, Clone)]
# pub struct Task {
# id: usize,
# pending: bool,
# }
#
# // ===== REACTOR =====
# struct Reactor {
# dispatcher: Sender<Event>,
# handle: Option<JoinHandle<()>>,
# outstanding: AtomicUsize,
# }
# #[derive(Debug)]
# enum Event {
# Close,
# Simple(MyWaker, u64),
# }
#
# impl Reactor {
# fn new() -> Self {
# let (tx, rx) = channel::<Event>();
# let mut handles = vec![];
# let handle = thread::spawn(move || {
# // This simulates some I/O resource
# for event in rx {
# match event {
# Event::Close => break,
# Event::Simple(mywaker, duration) => {
# let event_handle = thread::spawn(move || {
# thread::sleep(Duration::from_secs(duration));
# mywaker.wake();
# });
# handles.push(event_handle);
# }
# }
# }
#
# for handle in handles {
# handle.join().unwrap();
# }
# });
#
# Reactor {
# dispatcher: tx,
# handle: Some(handle),
# outstanding: AtomicUsize::new(0),
# }
# }
#
# fn register(&mut self, duration: u64, mywaker: MyWaker) {
# self.dispatcher
# .send(Event::Simple(mywaker, duration))
# .unwrap();
# self.outstanding.fetch_add(1, Ordering::Relaxed);
# }
#
# fn close(&mut self) {
# self.dispatcher.send(Event::Close).unwrap();
# }
# }
#
# impl Drop for Reactor {
# fn drop(&mut self) {
# self.handle.take().map(|h| h.join().unwrap()).unwrap();
# }
# }
```

1
src/0_3_proper_waker.md Normal file
View File

@@ -0,0 +1 @@
# Proper Waker

1
src/0_4_proper_future.md Normal file
View File

@@ -0,0 +1 @@
# Proper Future

1
src/0_5_async_wait.md Normal file
View File

@@ -0,0 +1 @@
# Supporting async/await

View File

@@ -0,0 +1 @@
# Bonus: concurrent futures

View File

@@ -2,3 +2,8 @@
- [Introduction](./0_0_introduction.md)
- [Some background information](./0_1_background_information.md)
- [Naive example](./0_2_naive_implementation.md)
- [Proper Waker](./0_3_proper_waker.md)
- [Proper Future](0_4_proper_future.md)
- [Supporting async/await](0_5_async_wait.md)
- [Bonus: concurrent futures](0_6_concurrent_futures.md)