59 lines
1.4 KiB
Rust
59 lines
1.4 KiB
Rust
use crate::game::{Action, Game};
|
|
use crate::playfield::{Matrix, PlayField};
|
|
use crate::tetromino::{Tetromino, TetrominoType};
|
|
use rand::rngs::SmallRng;
|
|
|
|
pub mod qlearning;
|
|
|
|
#[derive(Hash, PartialEq, Eq, Clone, Debug)]
|
|
pub struct State {
|
|
matrix: Matrix,
|
|
active_piece: Option<Tetromino>,
|
|
held_piece: Option<TetrominoType>,
|
|
line_clears: u32,
|
|
}
|
|
|
|
impl From<Game> for State {
|
|
fn from(game: Game) -> Self {
|
|
(&game).into()
|
|
}
|
|
}
|
|
|
|
impl From<&Game> for State {
|
|
fn from(game: &Game) -> Self {
|
|
let mut state: State = game.playfield().clone().into();
|
|
state.line_clears = game.line_clears;
|
|
state
|
|
}
|
|
}
|
|
|
|
impl From<PlayField> for State {
|
|
fn from(playfield: PlayField) -> Self {
|
|
Self {
|
|
matrix: playfield.field().clone(),
|
|
active_piece: playfield.active_piece,
|
|
held_piece: playfield.hold_piece().map(|t| t.clone()),
|
|
line_clears: 0,
|
|
}
|
|
}
|
|
}
|
|
|
|
pub trait Actor {
|
|
fn get_action(&self, rng: &mut SmallRng, state: &State, legal_actions: &[Action]) -> Action;
|
|
|
|
fn update(
|
|
&mut self,
|
|
state: State,
|
|
action: Action,
|
|
next_state: State,
|
|
next_legal_actions: &[Action],
|
|
reward: f64,
|
|
);
|
|
|
|
fn set_learning_rate(&mut self, learning_rate: f64);
|
|
fn set_exploration_prob(&mut self, exploration_prob: f64);
|
|
fn set_discount_rate(&mut self, discount_rate: f64);
|
|
|
|
fn dbg(&self);
|
|
}
|