From ce65afa27791188edce8140f2f8ea9044983eabb Mon Sep 17 00:00:00 2001 From: Edward Shen Date: Sun, 5 Apr 2020 13:31:05 -0400 Subject: [PATCH] use dynamic dispatch for actor selection --- src/cli.rs | 10 +++++++--- src/main.rs | 6 +++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index f1bd2e8..59c6abd 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -70,7 +70,8 @@ pub struct Train { arg_enum! { #[derive(Debug)] pub enum Agent { - QLearning + QLearning, + ApproximateQLearning, } } @@ -87,6 +88,9 @@ pub fn init_verbosity(opts: &Opts) -> Result<(), Box> { Ok(()) } -pub fn get_actor() -> impl Actor { - qlearning::QLearningAgent::default() +pub fn get_actor(agent: Agent) -> Box { + match agent { + Agent::QLearning => Box::new(qlearning::QLearningAgent::default()), + Agent::ApproximateQLearning => todo!(), + } } diff --git a/src/main.rs b/src/main.rs index 00339c3..5bcf9bd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -33,7 +33,7 @@ async fn main() -> Result<(), Box> { match opts.subcmd { SubCommand::Play(sub_opts) => {} SubCommand::Train(sub_opts) => { - let mut to_train = get_actor(); + let mut to_train = get_actor(sub_opts.agent); to_train.set_learning_rate(sub_opts.learning_rate); to_train.set_discount_rate(sub_opts.discount_rate); to_train.set_exploration_prob(sub_opts.exploration_prob); @@ -61,7 +61,7 @@ async fn main() -> Result<(), Box> { Ok(()) } -fn train_actor(episodes: usize, mut actor: impl Actor) -> impl Actor { +fn train_actor(episodes: usize, mut actor: Box) -> Box { let mut rng = rand::rngs::SmallRng::from_entropy(); let mut avg = 0.0; @@ -109,7 +109,7 @@ fn train_actor(episodes: usize, mut actor: impl Actor) -> impl Actor { actor } -async fn play_game(mut actor: Option) -> Result<(), Box> { +async fn play_game(mut actor: Option>) -> Result<(), Box> { let mut rng = rand::rngs::SmallRng::from_entropy(); let sdl_context = sdl2::init()?; let video_subsystem = sdl_context.video()?;