ch16 done

This commit is contained in:
Rowan Torbitzky-Lane 2025-04-01 22:07:12 -05:00
parent 62f797ae3b
commit 8d9f5104cc
12 changed files with 311 additions and 0 deletions

7
ch16/extensible-concurrency/Cargo.lock generated Normal file
View File

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "extensible-concurrency"
version = "0.1.0"

View File

@ -0,0 +1,6 @@
[package]
name = "extensible-concurrency"
version = "0.1.0"
edition = "2021"
[dependencies]

View File

@ -0,0 +1,20 @@
//! # Extensible Concurrency with the `Sync` and `Send` Traits
//!
//! Two concurrency concepts embedded in the language: the std::marker
//! `Sync` and `Send`.
fn main() {
// Allowing Transference of ownership between threads with `Send`
//
// `Send` marker traits indicates ownership of values of the
// type implementing `Send` can be transferred between threads.
// Almost every Rust type is `Send` besides `Rc<T>`.
// Allowing Access from Multiple Threads with `Sync`
//
// The `Sync` marker trait indicates it's safe for the type implementing
// `Sync` to be referenced from multiple threads.
//
// Implementing Send and Sync Manually Is Unsafe
// This doesn't exactly explain why, just says look at Chapter 20.
}

7
ch16/message-passing/Cargo.lock generated Normal file
View File

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "message-passing"
version = "0.1.0"

View File

@ -0,0 +1,6 @@
[package]
name = "message-passing"
version = "0.1.0"
edition = "2021"
[dependencies]

View File

@ -0,0 +1,100 @@
//! One way to ensure safe concurrency is message passing.
//! Threads send data between one another.
//! To accomplish message-sending concurrency, Rust
//! provides an implementation of channels. Channel
//! is generally a way for data to sent from one
//! channel to another.
//!
//! A channel has two halves: a transmitter and a reciever.
//! Channel is *closed* if either the transmitter or
//! reciever is dropped.
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
fn main() {
// mpsc stands for multiple producer, single consumer.
// Rust can have multilple sending and only one receiving
// tx for transmitter and rx for receiver.
let (tx, rx) = mpsc::channel();
// spawned thread needs to own transmitter to send messages.
thread::spawn(move || {
let val = String::from("hi");
tx.send(val).unwrap();
// Can't use after `send` call because the receiving
// thread may modify the value.
// println!("val is {val}");
});
// receiver has two useful methods: `recv` and `try_recv`.
// `recv` will block main thread's execution.
// `try_recv` useful if this thread has other work to do
// while waiting for messages.
let received = rx.recv().unwrap();
println!("Got: {received}");
// Sending multiple values and seeing the reciever waiting
println!("Sending multiple values and seeing the receiver waiting section");
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("hi"),
String::from("from"),
String::from("the"),
String::from("thread"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {received}");
}
// Creating multiple producers by cloning the transmitter
println!("Creating multiple producers by cloning the transmitter");
let (tx, rx) = mpsc::channel();
let tx1 = tx.clone();
thread::spawn(move || {
let vals = vec![
String::from("hi"),
String::from("from"),
String::from("the"),
String::from("thread"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("more"),
String::from("messages"),
String::from("for"),
String::from("you"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {received}");
}
}

7
ch16/shared-state/Cargo.lock generated Normal file
View File

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "shared-state"
version = "0.1.0"

View File

@ -0,0 +1,6 @@
[package]
name = "shared-state"
version = "0.1.0"
edition = "2021"
[dependencies]

View File

@ -0,0 +1,101 @@
//! # Shared-State Concurrency
//!
//! This is another way to handle concurrency. Sharing data.
//! Consider the slogan: "do not communicate by sharing memory."
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use std::thread;
fn main() {
// Using mutexes to allow access to data from one thread at a time
//
// Mutex is an abbreviation for mutual exclusion. Allows only one
// thread to access some data at any given time.
// To access data in a mutex, thread must first signal it wants
// access by asking to acquire the mutex's lock. The lock is
// a data structure part of the mutex that keeps track of who
// currently has exclusive access to the data. Therefore,
// mutex described as guarding the data.
//
// Mutexes have two rules:
// 1) Must attempt to acquire the lock before using the data
// 2) When done with data that mutex guards, must unlock data
// so other threads can acquire lock.
let m = Mutex::new(5);
{
let mut num = m.lock().unwrap();
*num = 6;
}
println!("m = {m:?}");
// Sharing a Mutex<T> Between multiple threads
// let counter = Mutex::new(0);
// let mut handles = vec![];
// for _ in 0..10 {
// let handle = thread::spawn(move || {
// let mut num = counter.lock().unwrap();
// *num += 1;
// });
// handles.push(handle);
// }
// for handle in handles {
// handle.join().unwrap();
// }
// println!("Result: {}", *counter.lock().unwrap());
// Multiple ownership with multiple threads
// let counter = Rc::new(Mutex::new(0));
// let mut handles = vec![];
// for _ in 0..10 {
// let counter = Rc::clone(&counter);
// let handle = thread::spawn(move || {
// let mut num = counter.lock().unwrap();
// *num += 1;
// });
// handles.push(handle);
// }
// for handle in handles {
// handle.join().unwrap();
// }
// println!("Result: {}", *counter.lock().unwrap());
// Atomic reference counting with Arc<T>
//
// Arc<T> is like Rc<T> is safe to use concurrent situations.
// The a stands for atomic meanings it's an atomically reference
// counted type. Thread safety like this comes at an example.
// Arc<T> and Rc<T> have the same API.
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}

7
ch16/threads/Cargo.lock generated Normal file
View File

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "threads"
version = "0.1.0"

6
ch16/threads/Cargo.toml Normal file
View File

@ -0,0 +1,6 @@
[package]
name = "threads"
version = "0.1.0"
edition = "2021"
[dependencies]

38
ch16/threads/src/main.rs Normal file
View File

@ -0,0 +1,38 @@
use std::thread;
use std::time::Duration;
fn main() {
let handle = thread::spawn(|| {
for i in 1..10 {
println!("hi number {i} from the spawned thread!");
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {i} from the main thread!");
thread::sleep(Duration::from_millis(1));
}
// Waiting for all threads to finish using join handles
// Without the join below, threads stop prematurely
// because there is no guarantee on the order in which
// threads run, also can't guarantee spawned thread
// will get to run at all.
// calling `join` on a handle blocks the thread currently
// running until the thread represented by `handle`
// terminates.
handle.join().unwrap();
// Using move closures with threads
//
// Often use move keyword with closures because
// closure takes ownership fo values it uses from environment.
let v = vec![1, 2, 3];
let handle = thread::spawn(move || {
println!("Here's a vector: {v:?}");
});
handle.join().unwrap();
}