From 6e8099c57facccf147a05081ed2d9a1413226aa1 Mon Sep 17 00:00:00 2001 From: Edward Shen Date: Tue, 21 Apr 2020 13:40:24 -0400 Subject: [PATCH] extract tar --- .../a0.1-g0.1-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.1-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.1-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.1-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.1-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.1-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.1-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.1-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.1-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.1-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.5-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.5-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.5-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.5-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.5-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.5-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.5-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.5-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.5-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.5-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.9-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.9-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.9-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.9-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.9-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.9-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.9-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.9-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.1-g0.9-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g0.9-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.1-g1.0-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g1.0-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.1-g1.0-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g1.0-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.1-g1.0-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g1.0-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.1-g1.0-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g1.0-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.1-g1.0-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.1-g1.0-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.1-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.1-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.1-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.1-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.1-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.1-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.1-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.1-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.1-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.1-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.5-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.5-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.5-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.5-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.5-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.5-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.5-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.5-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.5-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.5-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.9-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.9-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.9-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.9-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.9-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.9-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.9-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.9-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.5-g0.9-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g0.9-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.5-g1.0-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g1.0-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.5-g1.0-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g1.0-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.5-g1.0-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g1.0-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.5-g1.0-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g1.0-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.5-g1.0-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.5-g1.0-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.1-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.1-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.1-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.1-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.1-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.1-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.1-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.1-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.1-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.1-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.5-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.5-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.5-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.5-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.5-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.5-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.5-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.5-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.5-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.5-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.9-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.9-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.9-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.9-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.9-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.9-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.9-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.9-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.7-g0.9-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g0.9-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.7-g1.0-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g1.0-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.7-g1.0-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g1.0-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.7-g1.0-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g1.0-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.7-g1.0-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g1.0-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.7-g1.0-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.7-g1.0-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.1-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.1-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.1-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.1-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.1-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.1-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.1-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.1-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.1-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.1-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.5-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.5-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.5-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.5-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.5-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.5-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.5-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.5-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.5-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.5-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.9-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.9-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.9-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.9-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.9-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.9-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.9-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.9-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.9-g0.9-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g0.9-e0.9-qlearning | 30 ++++++++++++++++++ .../a0.9-g1.0-e0.1-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g1.0-e0.1-qlearning | 30 ++++++++++++++++++ .../a0.9-g1.0-e0.25-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g1.0-e0.25-qlearning | 30 ++++++++++++++++++ .../a0.9-g1.0-e0.5-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g1.0-e0.5-qlearning | 30 ++++++++++++++++++ .../a0.9-g1.0-e0.75-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g1.0-e0.75-qlearning | 30 ++++++++++++++++++ .../a0.9-g1.0-e0.9-approximateqlearning | 30 ++++++++++++++++++ qlearning-results/a0.9-g1.0-e0.9-qlearning | 30 ++++++++++++++++++ qlearning-results/test | 21 ++++++++++++ .../tetris | Bin 5109760 -> 4777888 bytes 162 files changed, 4821 insertions(+) create mode 100644 qlearning-results/a0.1-g0.1-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.1-qlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.25-qlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.5-qlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.75-qlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.1-e0.9-qlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.1-qlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.25-qlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.5-qlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.75-qlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.5-e0.9-qlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.1-qlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.25-qlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.5-qlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.75-qlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.1-g0.9-e0.9-qlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.1-qlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.25-qlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.5-qlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.75-qlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.1-g1.0-e0.9-qlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.1-qlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.25-qlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.5-qlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.75-qlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.1-e0.9-qlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.1-qlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.25-qlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.5-qlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.75-qlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.5-e0.9-qlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.1-qlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.25-qlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.5-qlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.75-qlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.5-g0.9-e0.9-qlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.1-qlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.25-qlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.5-qlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.75-qlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.5-g1.0-e0.9-qlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.1-qlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.25-qlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.5-qlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.75-qlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.1-e0.9-qlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.1-qlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.25-qlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.5-qlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.75-qlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.5-e0.9-qlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.1-qlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.25-qlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.5-qlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.75-qlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.7-g0.9-e0.9-qlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.1-qlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.25-qlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.5-qlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.75-qlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.7-g1.0-e0.9-qlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.1-qlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.25-qlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.5-qlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.75-qlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.1-e0.9-qlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.1-qlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.25-qlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.5-qlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.75-qlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.5-e0.9-qlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.1-qlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.25-qlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.5-qlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.75-qlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.9-g0.9-e0.9-qlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.1-approximateqlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.1-qlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.25-approximateqlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.25-qlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.5-approximateqlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.5-qlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.75-approximateqlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.75-qlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.9-approximateqlearning create mode 100644 qlearning-results/a0.9-g1.0-e0.9-qlearning create mode 100755 qlearning-results/test rename qlearning-results.tar => qlearning-results/tetris (93%) mode change 100644 => 100755 diff --git a/qlearning-results/a0.1-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..8098db4 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:53:51,825 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.1 +202.9900000000003 +203.45950000000053 +203.8405000000007 +204.57150000000019 +204.51700000000048 +205.23050000000032 +204.14600000000027 +202.9190000000006 +205.70500000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,098 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,306 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,529 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,705 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,898 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:15,090 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:15,329 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:15,537 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:54:15,762 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 19:54:15,969 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.1-g0.1-e0.1-qlearning b/qlearning-results/a0.1-g0.1-e0.1-qlearning new file mode 100644 index 0000000..c313de8 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:47:13,417 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.1 +220.95810000000168 +218.19660000000184 +216.98390000000128 +216.32795000000155 +216.2846500000026 +215.670950000002 +215.56090000000168 +215.49235000000138 +214.88230000000195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:39,974 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:40,517 INFO [tetris] Final score: 182 +Lost due to: LockOut +2020-04-20 22:55:40,757 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:42,069 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:55:42,438 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:43,814 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:55:45,093 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:55:46,614 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:47,941 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:49,078 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.1-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..8fcfb38 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:54:16,016 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.25 +199.9395000000003 +197.7290000000002 +198.4160000000001 +198.15649999999977 +198.8634999999999 +197.55950000000018 +197.8339999999998 +197.34150000000017 +197.9950000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:39,280 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:39,584 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:54:40,432 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:40,640 INFO [tetris] Final score: 158 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:41,008 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:54:41,216 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:54:41,568 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:41,840 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:42,112 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:42,304 INFO [tetris] Final score: 208 diff --git a/qlearning-results/a0.1-g0.1-e0.25-qlearning b/qlearning-results/a0.1-g0.1-e0.25-qlearning new file mode 100644 index 0000000..b9ef54f --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:56:02,392 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.25 +222.2459000000013 +219.66315000000242 +218.52945000000193 +217.64080000000234 +217.4608 +217.40135000000208 +217.14465000000126 +217.1419500000022 +216.6491000000021 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:07,085 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:07,565 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:08,444 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:08,732 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:09,453 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:05:10,236 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:10,989 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 23:05:12,061 INFO [tetris] Final score: 267 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:12,957 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 23:05:14,397 INFO [tetris] Final score: 218 diff --git a/qlearning-results/a0.1-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..4481ea7 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:54:42,317 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.5 +196.97650000000007 +193.87299999999973 +192.38500000000002 +192.71700000000013 +193.4105000000001 +192.64049999999997 +192.89299999999992 +193.16400000000004 +193.17149999999995 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:07,820 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:08,397 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:08,764 INFO [tetris] Final score: 142 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:09,149 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:10,188 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:10,557 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:10,956 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:11,356 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:11,773 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:12,044 INFO [tetris] Final score: 166 diff --git a/qlearning-results/a0.1-g0.1-e0.5-qlearning b/qlearning-results/a0.1-g0.1-e0.5-qlearning new file mode 100644 index 0000000..9a7d92b --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:05:31,376 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.5 +221.98790000000076 +220.2209500000016 +219.2323000000003 +218.9858000000025 +218.80650000000216 +218.30300000000062 +218.24995000000052 +217.698150000002 +217.9902500000012 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:19,306 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:19,977 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:20,889 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 23:14:21,529 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:22,538 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:23,673 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:24,457 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 23:14:25,113 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:25,786 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:26,777 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.1-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..7366047 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:55:12,057 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.75 +199.317 +198.07700000000025 +198.324 +198.93750000000006 +199.0355000000004 +199.3629999999999 +199.24750000000017 +198.76299999999966 +198.59600000000023 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:44,123 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:44,636 INFO [tetris] Final score: 118 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:45,771 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:46,795 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:47,532 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:48,252 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:48,892 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:49,291 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:55:50,076 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:50,860 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.1-g0.1-e0.75-qlearning b/qlearning-results/a0.1-g0.1-e0.75-qlearning new file mode 100644 index 0000000..fbd2d45 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:20:30,784 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.75 +222.8502999999997 +221.56735000000256 +220.65415000000044 +219.97580000000033 +220.14905000000118 +219.751050000002 +219.8127000000007 +219.5739499999999 +219.68790000000163 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:27:54,362 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 23:27:55,294 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:27:56,621 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 23:27:57,326 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:27:58,494 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:27:59,534 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:28:00,414 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:28:01,518 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:28:03,085 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:28:04,637 INFO [tetris] Final score: 236 diff --git a/qlearning-results/a0.1-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..a17230c --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:55:50,868 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.9 +206.9339999999998 +208.44249999999934 +207.38549999999978 +207.54550000000023 +208.83049999999997 +207.53250000000017 +208.2755000000005 +209.32850000000042 +209.90800000000064 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:56:31,530 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:32,745 INFO [tetris] Final score: 194 +Lost due to: LockOut +2020-04-20 19:56:33,993 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:35,017 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:35,689 INFO [tetris] Final score: 168 +Lost due to: LockOut +2020-04-20 19:56:37,497 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:38,490 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:39,177 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:40,073 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:40,698 INFO [tetris] Final score: 224 diff --git a/qlearning-results/a0.1-g0.1-e0.9-qlearning b/qlearning-results/a0.1-g0.1-e0.9-qlearning new file mode 100644 index 0000000..1edab43 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:28:22,227 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.9 +223.1952000000007 +222.9115500000005 +222.5591000000004 +222.30110000000096 +221.7994500000013 +222.20040000000213 +221.98400000000007 +221.9154000000008 +222.00440000000148 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:25,482 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:26,966 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:27,911 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:29,047 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:30,151 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:32,390 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:35:33,254 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:36,455 INFO [tetris] Final score: 388 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:37,463 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:39,511 INFO [tetris] Final score: 282 diff --git a/qlearning-results/a0.1-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..043bf78 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:51:03,272 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.1 +202.91550000000058 +201.90000000000043 +204.79149999999984 +203.40600000000023 +204.6635 +202.65850000000034 +204.23150000000106 +207.42250000000072 +203.14750000000012 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:24,270 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:24,494 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:24,686 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:24,862 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:25,039 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:25,231 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:25,423 INFO [tetris] Final score: 197 +Lost due to: LockOut +2020-04-20 19:51:25,615 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:25,823 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:26,015 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.1-g0.5-e0.1-qlearning b/qlearning-results/a0.1-g0.5-e0.1-qlearning new file mode 100644 index 0000000..d7227bb --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:03:08,902 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.1 +218.95680000000175 +216.40600000000256 +215.8475000000029 +216.2365500000022 +215.53205000000244 +215.29070000000203 +215.55760000000194 +215.17585000000147 +215.32435000000112 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:39,795 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:41,267 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:41,747 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:42,706 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:11:43,955 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:11:44,387 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:11:44,803 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:45,410 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:45,811 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:46,515 INFO [tetris] Final score: 206 diff --git a/qlearning-results/a0.1-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..8220391 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:51:26,021 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.25 +200.25600000000043 +199.62100000000024 +196.92050000000043 +199.74200000000064 +199.07450000000014 +199.32550000000037 +199.4075000000002 +197.92750000000015 +197.7249999999997 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:48,554 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:49,834 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:50,986 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:52,058 INFO [tetris] Final score: 136 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:51:52,474 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:53,802 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:55,722 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:51:56,202 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:56,809 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:57,801 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.1-g0.5-e0.25-qlearning b/qlearning-results/a0.1-g0.5-e0.25-qlearning new file mode 100644 index 0000000..c0bee5d --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:11:59,702 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.25 +221.37790000000103 +219.74895000000214 +218.8575500000007 +218.10520000000105 +217.6736000000012 +217.39030000000125 +217.24575000000306 +217.0567500000027 +216.70315000000298 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:21:03,153 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:04,064 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:04,993 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:06,465 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:07,361 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:07,937 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:09,217 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:10,177 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:21:11,281 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:12,033 INFO [tetris] Final score: 224 diff --git a/qlearning-results/a0.1-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..39306e2 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:51:57,814 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.5 +197.60850000000073 +194.93899999999988 +192.84999999999997 +194.20399999999952 +193.8585 +193.10500000000013 +192.72149999999954 +193.21349999999984 +193.71550000000025 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:22,269 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:22,605 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:22,814 INFO [tetris] Final score: 144 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:23,341 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:23,757 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:24,125 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:24,782 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:25,102 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:52:25,518 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:26,333 INFO [tetris] Final score: 230 diff --git a/qlearning-results/a0.1-g0.5-e0.5-qlearning b/qlearning-results/a0.1-g0.5-e0.5-qlearning new file mode 100644 index 0000000..1d71e64 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:21:26,605 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.5 +222.22215000000207 +220.20200000000062 +220.06675000000146 +219.06850000000324 +218.8581500000011 +218.28170000000168 +218.65165000000067 +218.28869999999947 +217.98975000000075 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:30:10,316 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:11,339 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:12,492 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:13,420 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:15,260 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:16,060 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:16,508 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:30:19,084 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:20,124 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:20,956 INFO [tetris] Final score: 207 diff --git a/qlearning-results/a0.1-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..6ce2f3e --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:52:26,341 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.75 +199.8565000000001 +198.30050000000028 +198.6645000000003 +199.38399999999982 +199.6905000000004 +198.73249999999976 +198.10299999999992 +198.96399999999988 +199.12200000000013 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:57,169 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:52:57,856 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:52:58,656 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:52:59,025 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:59,488 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:59,840 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:01,024 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:53:01,617 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:53:02,049 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:02,593 INFO [tetris] Final score: 180 diff --git a/qlearning-results/a0.1-g0.5-e0.75-qlearning b/qlearning-results/a0.1-g0.5-e0.75-qlearning new file mode 100644 index 0000000..19ae04c --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:30:37,639 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.75 +222.70830000000194 +221.62445000000056 +220.67714999999978 +220.32700000000204 +220.72685000000172 +220.04535000000004 +219.66020000000054 +219.8325000000021 +219.15890000000252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:38,738 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:40,721 INFO [tetris] Final score: 293 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:38:42,370 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:43,922 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:38:46,322 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:47,393 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:48,002 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:49,170 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:50,594 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:51,346 INFO [tetris] Final score: 225 diff --git a/qlearning-results/a0.1-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..2669e78 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:53:02,605 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.9 +207.45600000000007 +207.617 +207.4425000000002 +208.28600000000034 +210.19349999999963 +208.85100000000028 +207.92550000000023 +209.53050000000053 +207.58600000000015 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:43,157 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:43,925 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:45,140 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:45,797 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:46,756 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:48,197 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:53:49,413 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:50,101 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:50,852 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:51,813 INFO [tetris] Final score: 179 diff --git a/qlearning-results/a0.1-g0.5-e0.9-qlearning b/qlearning-results/a0.1-g0.5-e0.9-qlearning new file mode 100644 index 0000000..a55862d --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:39:09,115 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.9 +223.29540000000083 +222.6900000000015 +222.5335000000025 +221.9808500000014 +221.26825000000136 +221.8926500000009 +222.22910000000022 +221.34840000000182 +221.69575000000344 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:43,359 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:44,127 INFO [tetris] Final score: 145 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:45,151 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:47,232 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:48,767 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:46:50,736 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:46:51,455 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:46:52,847 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:54,031 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:55,039 INFO [tetris] Final score: 197 diff --git a/qlearning-results/a0.1-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..4b16379 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:48:51,558 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.1 +202.6465000000004 +202.39400000000015 +202.71350000000058 +203.02800000000005 +203.03899999999987 +203.0555000000006 +202.93950000000052 +202.5405000000001 +202.92950000000036 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:04,774 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:04,950 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,191 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,398 INFO [tetris] Final score: 208 +Lost due to: LockOut +2020-04-20 19:49:05,590 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,767 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,942 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:06,118 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:06,310 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:06,503 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.1-g0.9-e0.1-qlearning b/qlearning-results/a0.1-g0.9-e0.1-qlearning new file mode 100644 index 0000000..70cf2bc --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:55:09,252 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.1 +217.85775000000078 +214.23900000000074 +213.86090000000135 +212.71030000000212 +212.2356500000019 +211.60060000000047 +211.6340000000002 +211.5668500000007 +211.38930000000192 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:03:51,834 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:52,618 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:03:54,201 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:03:55,625 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:03:56,745 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:03:58,217 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:58,825 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:59,337 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:59,817 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:04:00,506 INFO [tetris] Final score: 190 diff --git a/qlearning-results/a0.1-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..3b9d3d1 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:49:06,512 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.25 +200.03150000000065 +200.79300000000003 +200.47100000000017 +200.0220000000006 +199.55649999999997 +200.4180000000004 +201.05850000000044 +200.04650000000038 +200.06300000000036 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:49:20,634 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:49:20,890 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,130 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,354 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,594 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,834 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:22,042 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:49:22,267 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:22,490 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:22,746 INFO [tetris] Final score: 187 diff --git a/qlearning-results/a0.1-g0.9-e0.25-qlearning b/qlearning-results/a0.1-g0.9-e0.25-qlearning new file mode 100644 index 0000000..6b3fdbf --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:04:13,262 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.25 +222.82825000000219 +220.54200000000077 +219.28270000000148 +218.53440000000182 +218.82440000000096 +218.7121500000015 +217.93305000000066 +217.89815000000158 +217.86430000000198 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:13:24,753 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:13:26,065 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:26,720 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:28,032 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:13:28,432 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:29,712 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:13:30,880 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:31,744 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:33,776 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:35,280 INFO [tetris] Final score: 227 diff --git a/qlearning-results/a0.1-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..dab391a --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:49:22,757 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.5 +197.83150000000063 +197.5155 +196.53200000000007 +197.7870000000006 +197.4235000000002 +198.2155000000005 +198.35950000000005 +197.71749999999992 +197.63600000000034 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:39,931 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:40,251 INFO [tetris] Final score: 162 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:49:40,603 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:40,907 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:41,179 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:49:41,435 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:41,755 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:42,011 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:42,427 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:43,307 INFO [tetris] Final score: 297 diff --git a/qlearning-results/a0.1-g0.9-e0.5-qlearning b/qlearning-results/a0.1-g0.9-e0.5-qlearning new file mode 100644 index 0000000..cdc8056 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:13:52,160 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.5 +222.20530000000127 +221.69330000000247 +220.42010000000138 +220.16220000000072 +219.9176000000008 +219.27640000000164 +219.26185000000027 +219.42875000000203 +219.08700000000275 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:36,915 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:38,339 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:38,690 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:22:39,362 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:41,331 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:42,179 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:22:43,074 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:45,251 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:46,883 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:22:47,715 INFO [tetris] Final score: 233 diff --git a/qlearning-results/a0.1-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..81a135c --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:49:43,324 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.75 +200.32500000000013 +200.8425000000005 +200.43950000000055 +200.54100000000017 +200.49400000000034 +200.66400000000002 +200.5815000000002 +199.424 +200.4714999999997 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:09,866 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:10,250 INFO [tetris] Final score: 152 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:11,114 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:50:12,122 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:50:12,730 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:13,482 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:14,586 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:15,610 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:50:16,554 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:17,419 INFO [tetris] Final score: 247 diff --git a/qlearning-results/a0.1-g0.9-e0.75-qlearning b/qlearning-results/a0.1-g0.9-e0.75-qlearning new file mode 100644 index 0000000..620c422 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:23:03,999 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.75 +223.42030000000008 +222.7879500000012 +221.70360000000295 +221.8445500000001 +221.79939999999996 +221.50765000000143 +220.82935000000086 +221.18140000000034 +221.43530000000243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:09,656 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:11,464 INFO [tetris] Final score: 281 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:12,808 INFO [tetris] Final score: 283 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:31:13,943 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:14,920 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:16,248 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:18,392 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:19,592 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:20,391 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:21,335 INFO [tetris] Final score: 225 diff --git a/qlearning-results/a0.1-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..4cfdc8e --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:50:17,433 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.9 +208.73600000000016 +209.4225000000004 +209.70750000000032 +207.65349999999984 +207.76400000000055 +208.6115000000001 +207.88149999999996 +207.8700000000006 +209.03950000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:54,363 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:50:55,243 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:50:56,059 INFO [tetris] Final score: 236 +Lost due to: LockOut +2020-04-20 19:50:57,324 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:50:58,987 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:59,980 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:00,763 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:01,403 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:02,603 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:03,259 INFO [tetris] Final score: 236 diff --git a/qlearning-results/a0.1-g0.9-e0.9-qlearning b/qlearning-results/a0.1-g0.9-e0.9-qlearning new file mode 100644 index 0000000..cd8c4bd --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:54:52,451 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.9 +223.64375000000032 +223.3694500000013 +223.38300000000066 +222.72075000000126 +222.8378500000019 +222.92165000000165 +222.5837000000008 +222.96700000000087 +223.00550000000015 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:36,064 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:37,489 INFO [tetris] Final score: 310 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:02:40,305 INFO [tetris] Final score: 307 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:02:41,089 INFO [tetris] Final score: 157 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:42,129 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:44,000 INFO [tetris] Final score: 308 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:46,160 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:02:47,168 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:49,024 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:50,288 INFO [tetris] Final score: 291 diff --git a/qlearning-results/a0.1-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..58487a2 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 15:49:27,904 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.1 +346.7053500000016 +374.44729999999885 +371.84564999999986 +372.5076500000012 +372.05924999999826 +373.353400000003 +372.66549999999904 +372.52020000000164 +372.8893 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:04:21,601 INFO [tetris] Final score: 478 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:04:32,017 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:04:52,545 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:05:31,713 INFO [tetris] Final score: 470 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:05:54,848 INFO [tetris] Final score: 295 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:06:23,569 INFO [tetris] Final score: 330 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:06:51,568 INFO [tetris] Final score: 353 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:07:48,178 INFO [tetris] Final score: 461 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:08:11,680 INFO [tetris] Final score: 373 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:08:39,793 INFO [tetris] Final score: 338 diff --git a/qlearning-results/a0.1-g1.0-e0.1-qlearning b/qlearning-results/a0.1-g1.0-e0.1-qlearning new file mode 100644 index 0000000..b18bb48 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 15:38:27,993 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.1 +218.93345000000153 +215.35020000000193 +214.6898000000031 +213.1617000000016 +213.27190000000175 +213.64700000000198 +212.96310000000136 +212.57775000000174 +212.41390000000064 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:01,102 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:02,094 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:03,965 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:04,574 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:05,421 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 15:49:06,574 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 15:49:07,886 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:09,262 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:12,030 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:13,918 INFO [tetris] Final score: 265 diff --git a/qlearning-results/a0.1-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..4244708 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:36:42,460 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.25 +200.03349999999998 +251.34600000000006 +291.4300000000002 +289.1145000000006 +294.6325000000002 +299.0275000000002 +297.8514999999994 +299.59449999999987 +300.00349999999935 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:42:08,739 INFO [tetris] Final score: 268 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:14,179 INFO [tetris] Final score: 314 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:20,643 INFO [tetris] Final score: 373 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:42:29,363 INFO [tetris] Final score: 277 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:42:37,651 INFO [tetris] Final score: 282 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:44,195 INFO [tetris] Final score: 293 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:52,179 INFO [tetris] Final score: 289 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:42:57,507 INFO [tetris] Final score: 325 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:43:06,675 INFO [tetris] Final score: 383 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:43:15,155 INFO [tetris] Final score: 341 diff --git a/qlearning-results/a0.1-g1.0-e0.25-qlearning b/qlearning-results/a0.1-g1.0-e0.25-qlearning new file mode 100644 index 0000000..1f8265c --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:09:03,827 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.25 +222.20209999999997 +219.74505000000042 +218.25350000000049 +217.56200000000186 +217.54045000000082 +216.33019999999962 +216.22405000000035 +215.58710000000076 +215.40420000000307 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:23,140 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:23,635 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:19:24,835 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:19:26,115 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:19:27,811 INFO [tetris] Final score: 300 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:19:28,291 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:28,707 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:29,379 INFO [tetris] Final score: 267 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:30,947 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:19:31,955 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.1-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..a9841e7 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:43:15,167 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.5 +199.35850000000056 +246.6900000000001 +252.32150000000004 +255.94250000000014 +256.25550000000027 +259.1924999999998 +257.8860000000002 +257.5220000000002 +256.3155 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:27,543 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:45:31,111 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:34,967 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:37,351 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:40,007 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:45,655 INFO [tetris] Final score: 359 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:45:50,471 INFO [tetris] Final score: 303 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:45:52,918 INFO [tetris] Final score: 260 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:54,823 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:45:57,287 INFO [tetris] Final score: 241 diff --git a/qlearning-results/a0.1-g1.0-e0.5-qlearning b/qlearning-results/a0.1-g1.0-e0.5-qlearning new file mode 100644 index 0000000..1111b5b --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:19:47,925 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.5 +222.31680000000145 +220.51680000000223 +219.2328500000012 +218.89920000000134 +218.72694999999985 +218.28585000000157 +217.96815000000126 +218.5306000000002 +218.48100000000122 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:03,621 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:04,629 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:06,149 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:06,949 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:08,404 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:09,109 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:10,996 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:12,357 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:29:13,477 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:14,277 INFO [tetris] Final score: 199 diff --git a/qlearning-results/a0.1-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..eabfed8 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:45:57,301 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.75 +213.57999999999984 +232.12850000000046 +236.8270000000001 +238.69949999999994 +236.26400000000027 +237.19350000000054 +237.26200000000057 +234.72600000000028 +237.0454999999998 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:15,705 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:18,809 INFO [tetris] Final score: 291 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:19,481 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:47:22,297 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:23,321 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:25,162 INFO [tetris] Final score: 260 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:47:27,097 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:47:29,337 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:30,857 INFO [tetris] Final score: 273 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:47:33,145 INFO [tetris] Final score: 207 diff --git a/qlearning-results/a0.1-g1.0-e0.75-qlearning b/qlearning-results/a0.1-g1.0-e0.75-qlearning new file mode 100644 index 0000000..390f6bf --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:29:30,884 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.75 +223.4209 +222.43200000000067 +221.8176000000021 +221.0938500000012 +221.02510000000146 +220.63270000000313 +220.69475000000156 +220.55210000000224 +220.20850000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:50,506 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:52,203 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:53,211 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:37:53,931 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:55,372 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:56,251 INFO [tetris] Final score: 141 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:57,787 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:59,659 INFO [tetris] Final score: 300 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:00,524 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:01,675 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.1-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..aa475ef --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:47:33,158 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.9 +219.51950000000062 +229.94900000000015 +229.59200000000027 +228.4244999999997 +228.53999999999974 +226.99699999999993 +229.38000000000034 +226.4289999999998 +228.8485000000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:35,817 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:37,449 INFO [tetris] Final score: 266 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:38,345 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:39,161 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:40,681 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:42,713 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:48:44,745 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:47,337 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:48,745 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:51,544 INFO [tetris] Final score: 251 diff --git a/qlearning-results/a0.1-g1.0-e0.9-qlearning b/qlearning-results/a0.1-g1.0-e0.9-qlearning new file mode 100644 index 0000000..6bbff11 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:46:50,904 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.9 +224.01290000000077 +223.2799000000014 +222.90805000000123 +222.36550000000076 +222.8054500000009 +222.31225000000111 +222.2265000000006 +222.38505000000137 +222.1896500000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:37,200 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:38,317 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:54:39,998 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:41,789 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:43,133 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:44,350 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:54:46,574 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:47,421 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:48,766 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:49,437 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.5-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..ae91c93 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:42:24,169 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.1 +203.37650000000005 +203.32950000000062 +205.2715000000001 +204.56650000000042 +205.21499999999992 +204.48100000000048 +203.35300000000078 +205.1395000000002 +204.1155 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:52,667 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:52,891 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,099 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,307 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,483 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,691 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:42:53,883 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:54,091 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 20:42:54,284 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:54,475 INFO [tetris] Final score: 184 diff --git a/qlearning-results/a0.5-g0.1-e0.1-qlearning b/qlearning-results/a0.5-g0.1-e0.1-qlearning new file mode 100644 index 0000000..581b690 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:49:07,486 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.1 +220.52315000000206 +218.00770000000026 +216.9214000000011 +216.5741500000017 +216.04140000000086 +215.82124999999905 +215.89405000000198 +215.5497000000022 +215.38415000000222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:47,105 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:56:47,473 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:48,817 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:56:49,201 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:50,145 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:51,473 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:52,033 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:52,609 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 01:56:53,489 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:54,401 INFO [tetris] Final score: 216 diff --git a/qlearning-results/a0.5-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..99da7a6 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:42:54,489 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.25 +196.89799999999997 +197.32500000000024 +196.77949999999998 +196.7270000000004 +198.16650000000024 +197.20350000000045 +198.81850000000034 +196.93449999999964 +195.7664999999999 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:20,168 INFO [tetris] Final score: 155 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:43:23,208 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:24,664 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:27,224 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:43:28,968 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:43:31,128 INFO [tetris] Final score: 270 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:32,840 INFO [tetris] Final score: 275 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:43:33,481 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:34,456 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:43:34,968 INFO [tetris] Final score: 140 diff --git a/qlearning-results/a0.5-g0.1-e0.25-qlearning b/qlearning-results/a0.5-g0.1-e0.25-qlearning new file mode 100644 index 0000000..e357fd7 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:57:05,932 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.25 +221.65875000000136 +219.62845000000095 +218.3312500000014 +217.77255000000184 +216.97090000000227 +217.32470000000177 +217.11195000000083 +216.90805000000185 +216.57765000000197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:18,018 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:18,578 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:05:19,009 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:19,826 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:05:21,202 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:05:21,873 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:23,490 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:24,449 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:05:24,930 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:25,665 INFO [tetris] Final score: 185 diff --git a/qlearning-results/a0.5-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..8dcb9b0 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:43:34,981 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.5 +194.5400000000003 +193.8670000000001 +192.81949999999986 +193.60599999999968 +194.58000000000044 +193.0275 +193.3220000000003 +192.87450000000018 +193.6115 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:02,579 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:44:02,819 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:03,091 INFO [tetris] Final score: 143 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:03,795 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:04,659 INFO [tetris] Final score: 143 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:04,931 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:05,475 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:06,131 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:06,579 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:06,915 INFO [tetris] Final score: 188 diff --git a/qlearning-results/a0.5-g0.1-e0.5-qlearning b/qlearning-results/a0.5-g0.1-e0.5-qlearning new file mode 100644 index 0000000..778f1c9 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:05:39,081 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.5 +222.63780000000236 +220.23420000000027 +219.6870500000017 +219.27970000000136 +218.87424999999996 +217.92980000000009 +218.1931000000028 +218.29909999999973 +218.03945000000206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:36,335 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:37,935 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:39,727 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:13:41,039 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:42,240 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:43,471 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:44,415 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:13:45,008 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:46,160 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:46,607 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.5-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..a8731a7 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:44:06,927 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.75 +197.86499999999978 +198.33849999999993 +199.11550000000037 +199.1649999999996 +198.89099999999974 +198.57650000000004 +200.0650000000003 +200.02300000000014 +198.99549999999994 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:44:40,315 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:41,228 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:42,140 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:42,636 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:43,788 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:44,988 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:45,707 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:46,283 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:46,971 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:48,172 INFO [tetris] Final score: 184 diff --git a/qlearning-results/a0.5-g0.1-e0.75-qlearning b/qlearning-results/a0.5-g0.1-e0.75-qlearning new file mode 100644 index 0000000..e61d860 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:14:01,778 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.75 +223.03660000000102 +221.14205000000183 +220.64650000000265 +220.44305000000108 +220.33760000000007 +220.54309999999998 +219.56440000000123 +219.90739999999963 +219.63010000000193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:23,926 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:25,942 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:21:26,726 INFO [tetris] Final score: 141 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:29,062 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:30,822 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 02:21:32,134 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:33,254 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:34,407 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:36,022 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:36,950 INFO [tetris] Final score: 206 diff --git a/qlearning-results/a0.5-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..b05cc2d --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:44:48,188 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.9 +210.13300000000012 +209.122 +208.12050000000042 +208.88250000000016 +208.1355 +209.6869999999997 +208.66399999999985 +208.08800000000028 +208.9255000000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:29,675 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:30,778 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:31,803 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:45:32,363 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:33,227 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:45:34,251 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:35,162 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:35,835 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:45:36,682 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:45:37,498 INFO [tetris] Final score: 226 diff --git a/qlearning-results/a0.5-g0.1-e0.9-qlearning b/qlearning-results/a0.5-g0.1-e0.9-qlearning new file mode 100644 index 0000000..e7649a2 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:21:53,490 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.9 +223.46845000000124 +223.1418000000001 +222.22019999999972 +222.05500000000015 +222.0302500000011 +222.12285000000077 +221.99990000000147 +221.55780000000158 +221.98394999999928 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:28:52,171 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 02:28:54,875 INFO [tetris] Final score: 319 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:28:56,251 INFO [tetris] Final score: 199 +Lost due to: LockOut +2020-04-21 02:28:57,515 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:28:58,859 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:00,155 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:29:02,043 INFO [tetris] Final score: 339 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:03,131 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:04,251 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:06,363 INFO [tetris] Final score: 239 diff --git a/qlearning-results/a0.5-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..93fed9d --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:39:29,556 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.1 +205.1515000000002 +203.5880000000005 +204.79000000000022 +205.6105000000004 +204.66900000000066 +204.7145 +203.85100000000045 +204.1160000000002 +205.15300000000053 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:39:54,816 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,008 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,217 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,424 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 20:39:55,632 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,825 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:39:56,048 INFO [tetris] Final score: 198 +Lost due to: LockOut +2020-04-20 20:39:56,304 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:56,528 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:56,736 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.5-g0.5-e0.1-qlearning b/qlearning-results/a0.5-g0.5-e0.1-qlearning new file mode 100644 index 0000000..87c9462 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:08:55,755 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.1 +219.5724500000018 +217.2081000000039 +216.35635000000116 +215.82600000000267 +215.68915000000212 +215.25045000000264 +215.66660000000238 +215.1686000000017 +215.74980000000153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:34,121 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 01:16:34,841 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 01:16:35,784 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:37,081 INFO [tetris] Final score: 273 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:16:37,480 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:16:38,264 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:16:39,305 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:39,993 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:40,873 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:41,721 INFO [tetris] Final score: 196 diff --git a/qlearning-results/a0.5-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..1fdcea6 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:39:56,747 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.25 +198.71550000000005 +197.91450000000012 +198.36000000000013 +198.0735 +197.2970000000005 +197.83399999999992 +198.572 +197.56950000000018 +198.58100000000022 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:40:21,258 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:22,697 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:23,017 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:40:23,337 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:24,169 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:25,785 INFO [tetris] Final score: 304 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:26,986 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:27,241 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:40:28,425 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:40:28,745 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.5-g0.5-e0.25-qlearning b/qlearning-results/a0.5-g0.5-e0.25-qlearning new file mode 100644 index 0000000..0219635 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:16:53,056 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.25 +222.49660000000034 +219.73660000000217 +218.96715000000236 +218.05695000000065 +217.58195000000208 +217.3148500000012 +217.7000500000011 +217.22635000000182 +217.14505000000122 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 01:25:04,308 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:04,948 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:05,460 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:06,084 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:25:06,436 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:25:07,476 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:08,212 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:25:09,748 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:25:11,428 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:12,372 INFO [tetris] Final score: 216 diff --git a/qlearning-results/a0.5-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..0cf8f6c --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:40:28,756 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.5 +193.7969999999999 +192.72800000000007 +193.6359999999998 +193.49500000000052 +194.94100000000026 +193.34750000000048 +194.48699999999997 +194.4004999999999 +193.89799999999997 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:40:54,867 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:55,315 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:55,603 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:55,956 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:56,275 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:56,595 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:56,947 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:57,251 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:40:57,603 INFO [tetris] Final score: 142 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:58,131 INFO [tetris] Final score: 218 diff --git a/qlearning-results/a0.5-g0.5-e0.5-qlearning b/qlearning-results/a0.5-g0.5-e0.5-qlearning new file mode 100644 index 0000000..6cffd36 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:25:25,702 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.5 +222.92050000000148 +220.33499999999992 +219.15265000000207 +219.5142500000014 +218.45280000000096 +218.5314500000014 +218.28495000000092 +217.89600000000118 +217.987750000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:21,155 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:33:22,356 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:24,019 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:24,964 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:26,228 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:27,459 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:29,043 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:30,244 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 01:33:31,028 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:32,580 INFO [tetris] Final score: 248 diff --git a/qlearning-results/a0.5-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..4cf6ca8 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:40:58,145 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.75 +197.96099999999993 +198.97100000000023 +200.02950000000013 +199.54850000000042 +199.69600000000028 +197.88949999999997 +198.17749999999975 +198.86950000000041 +199.13550000000035 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:29,779 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:41:30,291 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:31,235 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:31,827 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:41:32,227 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:33,251 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:41:33,955 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:34,435 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:35,187 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:41:35,907 INFO [tetris] Final score: 231 diff --git a/qlearning-results/a0.5-g0.5-e0.75-qlearning b/qlearning-results/a0.5-g0.5-e0.75-qlearning new file mode 100644 index 0000000..ff8cd48 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:33:47,681 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.75 +223.2484499999999 +221.97360000000006 +221.1147500000011 +220.56820000000087 +219.88284999999982 +220.1310000000007 +219.69925000000114 +219.76260000000016 +219.58485000000098 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:09,567 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:41:11,343 INFO [tetris] Final score: 269 +Lost due to: LockOut +2020-04-21 01:41:13,087 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:14,479 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:15,999 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:17,359 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:18,143 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:19,295 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:20,879 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:21,983 INFO [tetris] Final score: 247 diff --git a/qlearning-results/a0.5-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..e759f59 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:41:35,921 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.9 +208.1320000000003 +208.23949999999988 +208.45799999999983 +208.3780000000001 +208.99550000000002 +209.09000000000043 +208.07650000000012 +208.97400000000025 +207.9755000000001 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:42:16,458 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:42:17,098 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:17,834 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:42:18,506 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:42:19,546 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:20,378 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:21,386 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:42:22,090 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:23,466 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:24,154 INFO [tetris] Final score: 158 diff --git a/qlearning-results/a0.5-g0.5-e0.9-qlearning b/qlearning-results/a0.5-g0.5-e0.9-qlearning new file mode 100644 index 0000000..f61efee --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:41:38,391 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.9 +223.55335000000053 +222.89440000000275 +222.76655000000048 +221.91440000000173 +221.9375500000019 +221.7492500000019 +221.86305000000246 +221.64800000000145 +221.42140000000026 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:38,825 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:40,121 INFO [tetris] Final score: 269 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:40,857 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:42,329 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:43,945 INFO [tetris] Final score: 261 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:48:45,208 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:48:46,121 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:48,040 INFO [tetris] Final score: 292 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:49,400 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:50,169 INFO [tetris] Final score: 204 diff --git a/qlearning-results/a0.5-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..783f323 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:36:28,129 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.1 +202.97050000000047 +202.64200000000017 +203.3925000000003 +202.7515000000006 +203.1550000000002 +203.68400000000057 +202.99150000000046 +229.16699999999997 +202.97650000000021 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,081 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:36:52,306 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,482 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,658 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,850 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:36:53,170 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:53,362 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:53,602 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:53,811 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:54,017 INFO [tetris] Final score: 201 diff --git a/qlearning-results/a0.5-g0.9-e0.1-qlearning b/qlearning-results/a0.5-g0.9-e0.1-qlearning new file mode 100644 index 0000000..57fd656 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:28:36,020 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.1 +214.4217500000025 +213.4693500000033 +212.35110000000185 +211.8555500000019 +211.06070000000088 +211.5214500000015 +210.9196000000009 +210.7748500000007 +210.66610000000165 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:21,382 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:36:21,990 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:23,286 INFO [tetris] Final score: 280 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:23,942 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:36:24,581 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:36:25,318 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:26,149 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:27,094 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:36:28,038 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:36:28,741 INFO [tetris] Final score: 209 diff --git a/qlearning-results/a0.5-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..441c7ac --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:36:54,029 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.25 +200.2145000000004 +200.25650000000027 +201.15650000000036 +200.76500000000038 +200.30800000000002 +203.1889999999998 +205.19950000000046 +204.66200000000038 +202.46200000000036 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,123 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,331 INFO [tetris] Final score: 206 +Lost due to: LockOut +2020-04-20 20:37:22,555 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,747 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,987 INFO [tetris] Final score: 202 +Lost due to: LockOut +2020-04-20 20:37:23,227 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:37:23,451 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:37:23,723 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:37:23,931 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:37:24,155 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.5-g0.9-e0.25-qlearning b/qlearning-results/a0.5-g0.9-e0.25-qlearning new file mode 100644 index 0000000..8801cb5 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:36:40,028 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.25 +219.52965000000134 +217.46480000000184 +217.05085000000153 +216.81235000000166 +216.0612000000017 +216.112400000001 +216.3895000000015 +216.86630000000244 +217.4367500000018 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:47,540 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:48,276 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:49,156 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:44:49,861 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:50,596 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:44:51,476 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:44:51,956 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:44:53,220 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:54,437 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:44:55,668 INFO [tetris] Final score: 195 diff --git a/qlearning-results/a0.5-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..dc4f6de --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:37:24,167 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.5 +198.03900000000033 +198.39150000000035 +196.89949999999996 +193.20600000000013 +196.12100000000032 +195.79950000000042 +196.5690000000002 +193.33049999999972 +196.24600000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:57,626 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:58,186 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:59,370 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:00,458 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:00,794 INFO [tetris] Final score: 122 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:01,322 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:01,754 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:02,330 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:03,018 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:04,250 INFO [tetris] Final score: 240 diff --git a/qlearning-results/a0.5-g0.9-e0.5-qlearning b/qlearning-results/a0.5-g0.9-e0.5-qlearning new file mode 100644 index 0000000..bb7aec8 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:45:08,355 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.5 +223.13450000000327 +221.98790000000136 +220.86610000000152 +221.07425000000228 +220.40510000000216 +219.85380000000075 +219.9902500000028 +219.56560000000047 +219.67725000000144 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:52:59,125 INFO [tetris] Final score: 283 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:00,277 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:02,116 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:03,141 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:04,069 INFO [tetris] Final score: 165 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:04,405 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:53:05,381 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:53:07,125 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:08,661 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:53:10,341 INFO [tetris] Final score: 254 diff --git a/qlearning-results/a0.5-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..758d8f3 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:38:04,265 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.75 +200.59000000000043 +199.5249999999997 +199.93000000000018 +198.82799999999986 +198.56499999999997 +198.56899999999956 +198.13950000000006 +199.04650000000038 +199.0665000000002 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:35,806 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:36,110 INFO [tetris] Final score: 128 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:38:36,878 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:38:37,518 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:38,206 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:38,910 INFO [tetris] Final score: 117 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:38:40,414 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:42,110 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:42,638 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:43,759 INFO [tetris] Final score: 134 diff --git a/qlearning-results/a0.5-g0.9-e0.75-qlearning b/qlearning-results/a0.5-g0.9-e0.75-qlearning new file mode 100644 index 0000000..61cc5eb --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:53:24,589 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.75 +224.54995000000062 +222.71715000000097 +222.9293000000019 +222.2246000000026 +222.39265000000157 +222.02625000000216 +222.27435 +221.88075000000086 +221.68995000000257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:53,262 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:00:54,126 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:54,894 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:55,854 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:57,438 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:00:59,086 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:01:00,094 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:01:01,614 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:01:02,190 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:01:03,518 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.5-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..44aef7f --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:38:43,772 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.9 +210.3485000000002 +207.0494999999995 +208.65500000000037 +208.2455 +208.08599999999996 +207.6900000000003 +208.2600000000004 +207.8269999999999 +208.09000000000012 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:21,191 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:39:22,247 INFO [tetris] Final score: 275 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:39:23,255 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:39:24,327 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:24,983 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:26,103 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:26,599 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:39:27,975 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:28,615 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:29,543 INFO [tetris] Final score: 201 diff --git a/qlearning-results/a0.5-g0.9-e0.9-qlearning b/qlearning-results/a0.5-g0.9-e0.9-qlearning new file mode 100644 index 0000000..8d8368d --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:01:19,704 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.9 +224.0625500000003 +223.82470000000038 +223.50610000000273 +223.70925000000082 +223.13070000000013 +223.37880000000027 +223.02480000000185 +223.8222000000021 +222.90475000000066 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:27,931 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:29,132 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:30,620 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:08:31,435 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:08:32,220 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:08:32,940 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:08:34,684 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:35,660 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:08:37,052 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:38,620 INFO [tetris] Final score: 208 diff --git a/qlearning-results/a0.5-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..0b7a527 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:56:40,713 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.1 +314.89899999999966 +379.1089999999996 +374.69950000000034 +374.3954999999996 +372.0515000000002 +374.66099999999955 +372.2300000000005 +375.03550000000064 +373.0990000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:18:46,365 INFO [tetris] Final score: 366 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:19:16,541 INFO [tetris] Final score: 423 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:59,453 INFO [tetris] Final score: 553 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:20:17,773 INFO [tetris] Final score: 309 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:20:43,677 INFO [tetris] Final score: 362 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:21:15,101 INFO [tetris] Final score: 353 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:21:44,413 INFO [tetris] Final score: 413 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:22:10,925 INFO [tetris] Final score: 449 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:22:29,901 INFO [tetris] Final score: 339 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:22:56,925 INFO [tetris] Final score: 372 diff --git a/qlearning-results/a0.5-g1.0-e0.1-qlearning b/qlearning-results/a0.5-g1.0-e0.1-qlearning new file mode 100644 index 0000000..c19f6c8 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:35:57,164 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.1 +217.07010000000116 +215.48750000000126 +213.89315000000127 +213.9104000000016 +213.30205000000078 +213.07895000000104 +213.82985000000173 +214.11780000000059 +214.23375000000158 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:41,844 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:44,852 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 23:49:46,660 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 23:49:47,764 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:49,012 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:49:49,892 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:50,836 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:52,692 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 23:49:53,764 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 23:49:55,908 INFO [tetris] Final score: 245 diff --git a/qlearning-results/a0.5-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..c0d39f4 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:22:56,937 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.25 +265.472 +301.7995000000003 +300.2394999999993 +297.31350000000015 +296.8614999999998 +298.42900000000003 +298.0585000000003 +298.2465 +297.685 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:27,245 INFO [tetris] Final score: 377 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:36,141 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:47,436 INFO [tetris] Final score: 327 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:51,980 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:30:01,820 INFO [tetris] Final score: 312 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:30:10,029 INFO [tetris] Final score: 376 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:30:19,100 INFO [tetris] Final score: 326 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:30:24,572 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:30:31,052 INFO [tetris] Final score: 290 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:30:37,228 INFO [tetris] Final score: 285 diff --git a/qlearning-results/a0.5-g1.0-e0.25-qlearning b/qlearning-results/a0.5-g1.0-e0.25-qlearning new file mode 100644 index 0000000..9a480c1 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:50:10,585 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.25 +220.29590000000104 +218.86105000000103 +218.401800000002 +217.84359999999987 +217.01880000000187 +216.45335000000077 +216.29710000000202 +217.15850000000142 +216.41589999999925 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:39,136 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:01:40,416 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:43,104 INFO [tetris] Final score: 355 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:44,608 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:46,464 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:48,399 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:01:50,416 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:52,288 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:54,096 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:55,104 INFO [tetris] Final score: 154 diff --git a/qlearning-results/a0.5-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..97543a6 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:30:37,235 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.5 +241.37399999999965 +257.31149999999957 +257.2300000000001 +257.35450000000003 +256.73199999999986 +255.0735 +256.7804999999999 +256.34699999999964 +256.6519999999999 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:33:06,085 INFO [tetris] Final score: 341 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:33:07,556 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:08,980 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:12,164 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:13,732 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:17,364 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:20,340 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:23,124 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:33:25,748 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:29,860 INFO [tetris] Final score: 303 diff --git a/qlearning-results/a0.5-g1.0-e0.5-qlearning b/qlearning-results/a0.5-g1.0-e0.5-qlearning new file mode 100644 index 0000000..9158afd --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:02:10,694 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.5 +222.3642500000013 +219.94975000000082 +219.3147500000007 +219.1652000000013 +218.8023500000009 +218.44645000000153 +218.14265000000276 +219.10639999999998 +218.61155000000076 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:43,071 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:11:43,903 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:44,639 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:46,655 INFO [tetris] Final score: 260 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:47,824 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:11:48,559 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:11:49,679 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:50,399 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:51,999 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:53,728 INFO [tetris] Final score: 234 diff --git a/qlearning-results/a0.5-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..a09bdec --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:33:29,865 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.75 +232.42399999999986 +237.16400000000016 +235.45699999999982 +237.99450000000027 +236.0200000000004 +235.28949999999986 +237.74450000000007 +236.42199999999983 +238.13749999999965 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:34:53,785 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:34:56,057 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:34:58,265 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:35:00,824 INFO [tetris] Final score: 336 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:02,937 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:05,465 INFO [tetris] Final score: 297 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:07,208 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:35:08,297 INFO [tetris] Final score: 172 +Lost due to: LockOut +2020-04-20 20:35:10,873 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:12,489 INFO [tetris] Final score: 270 diff --git a/qlearning-results/a0.5-g1.0-e0.75-qlearning b/qlearning-results/a0.5-g1.0-e0.75-qlearning new file mode 100644 index 0000000..4f69ce0 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:12:10,114 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.75 +222.72695000000047 +221.92535000000115 +221.42015000000058 +221.31975000000068 +221.83170000000075 +221.0318000000008 +220.87970000000115 +221.33515000000043 +220.92910000000137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:16,781 INFO [tetris] Final score: 292 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:18,157 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:20:19,149 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:20,957 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:20:22,126 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:22,973 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:24,318 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:25,342 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:20:27,101 INFO [tetris] Final score: 280 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:27,998 INFO [tetris] Final score: 190 diff --git a/qlearning-results/a0.5-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..58b6e4f --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:35:12,493 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.9 +227.1795000000003 +227.45000000000027 +227.78050000000044 +227.90849999999978 +227.35900000000012 +226.63799999999975 +226.39900000000037 +228.26449999999986 +226.9465000000005 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:15,916 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:16,845 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:18,284 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:19,820 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:36:20,908 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:22,652 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:24,333 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:36:25,084 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:26,684 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:36:28,124 INFO [tetris] Final score: 237 diff --git a/qlearning-results/a0.5-g1.0-e0.9-qlearning b/qlearning-results/a0.5-g1.0-e0.9-qlearning new file mode 100644 index 0000000..89d6c39 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:20:45,202 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.9 +223.9953500000022 +222.9694500000008 +223.12235000000067 +222.78095000000044 +223.28720000000112 +223.08990000000207 +222.52310000000165 +222.5425500000013 +222.57469999999955 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:05,607 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:06,796 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:08,428 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:10,011 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:11,772 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:13,068 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:14,108 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:15,691 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:17,244 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:18,412 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.7-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..4d43600 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:31:56,907 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.1 +205.06799999999987 +203.4910000000006 +203.63700000000023 +204.9025000000004 +205.20100000000005 +203.47650000000007 +203.90649999999988 +204.76350000000016 +203.32050000000007 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:32:27,963 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:32:28,316 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:32:28,556 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:32:28,844 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:32:29,100 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:29,500 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:29,851 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:30,043 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:30,267 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:30,508 INFO [tetris] Final score: 195 diff --git a/qlearning-results/a0.7-g0.1-e0.1-qlearning b/qlearning-results/a0.7-g0.1-e0.1-qlearning new file mode 100644 index 0000000..ab3d4d4 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:45:27,198 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.1 +220.9134500000001 +218.46825000000183 +216.96985000000112 +216.49105000000256 +216.20370000000025 +215.66090000000148 +216.01450000000276 +215.32430000000122 +215.499950000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:07,450 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:08,489 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:53:09,145 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:10,713 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:11,194 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:12,122 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:12,489 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:13,386 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:14,090 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:53:14,650 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.7-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..6c913bf --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:32:30,522 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.25 +197.7265 +196.07349999999977 +195.49700000000016 +197.3180000000003 +196.854 +197.9659999999997 +196.6815000000002 +197.66900000000015 +196.31300000000013 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:32:56,999 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:32:57,175 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:57,543 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:57,783 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:58,007 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:58,375 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:58,759 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:59,191 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:59,495 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:59,735 INFO [tetris] Final score: 202 diff --git a/qlearning-results/a0.7-g0.1-e0.25-qlearning b/qlearning-results/a0.7-g0.1-e0.25-qlearning new file mode 100644 index 0000000..87ba602 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:53:26,134 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.25 +221.59775000000022 +219.6475500000023 +218.40525000000093 +217.8659000000026 +217.86260000000172 +217.56835000000152 +216.838600000002 +216.65040000000275 +216.9875500000018 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:42,328 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:44,536 INFO [tetris] Final score: 279 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:01:45,352 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:46,456 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 05:01:47,192 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:47,928 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:01:49,208 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:50,120 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:51,320 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:01:53,144 INFO [tetris] Final score: 265 diff --git a/qlearning-results/a0.7-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..47ea943 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:32:59,746 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.5 +193.10700000000023 +192.20799999999977 +192.60200000000037 +193.61800000000002 +193.59550000000024 +193.2725000000001 +194.71150000000011 +193.1584999999996 +193.34999999999997 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:33:27,856 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:28,224 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:28,785 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:33:29,744 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:30,081 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:33:30,816 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:31,232 INFO [tetris] Final score: 157 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:33:31,584 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:32,000 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:32,992 INFO [tetris] Final score: 214 diff --git a/qlearning-results/a0.7-g0.1-e0.5-qlearning b/qlearning-results/a0.7-g0.1-e0.5-qlearning new file mode 100644 index 0000000..32bced8 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:02:06,528 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.5 +222.32865000000132 +220.41344999999964 +219.4295000000009 +219.03665000000092 +218.2892000000019 +218.8974500000003 +218.19365000000175 +218.02370000000064 +217.78890000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:03,098 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:04,058 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:05,226 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:07,002 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:08,025 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:10:09,129 INFO [tetris] Final score: 161 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:10,170 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:11,321 INFO [tetris] Final score: 206 +Lost due to: LockOut +2020-04-21 05:10:12,090 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:10:12,905 INFO [tetris] Final score: 173 diff --git a/qlearning-results/a0.7-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..5dbabd1 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:33:33,005 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.75 +198.2490000000002 +199.33100000000022 +198.01250000000007 +199.5534999999999 +199.09949999999992 +199.26700000000008 +199.94449999999983 +199.19699999999986 +198.18700000000027 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:06,201 INFO [tetris] Final score: 129 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:07,081 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:34:07,800 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:34:08,232 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:08,617 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:09,129 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:09,641 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:10,217 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:10,985 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:11,305 INFO [tetris] Final score: 189 diff --git a/qlearning-results/a0.7-g0.1-e0.75-qlearning b/qlearning-results/a0.7-g0.1-e0.75-qlearning new file mode 100644 index 0000000..5b1ead6 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:10:28,038 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.75 +222.84460000000047 +221.21740000000136 +220.92145000000144 +220.32520000000156 +220.87345000000195 +219.41920000000152 +220.1004000000004 +219.86515000000256 +219.05949999999996 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 05:17:50,430 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:17:51,726 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:17:52,382 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:17:54,894 INFO [tetris] Final score: 271 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 05:17:57,565 INFO [tetris] Final score: 290 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:17:59,342 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:18:00,670 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:18:01,470 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 05:18:02,766 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:18:03,534 INFO [tetris] Final score: 191 diff --git a/qlearning-results/a0.7-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..dab050a --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:34:11,321 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.9 +208.7134999999998 +209.66350000000003 +208.55499999999992 +208.98049999999984 +209.2109999999997 +210.35750000000024 +208.9709999999997 +210.14850000000013 +209.65449999999998 +Lost due to: LockOut +2020-04-20 21:34:53,086 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:54,175 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:55,295 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:56,110 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:34:57,598 INFO [tetris] Final score: 289 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:34:58,879 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:34:59,310 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:35:00,270 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:35:01,215 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:35:02,959 INFO [tetris] Final score: 280 diff --git a/qlearning-results/a0.7-g0.1-e0.9-qlearning b/qlearning-results/a0.7-g0.1-e0.9-qlearning new file mode 100644 index 0000000..7e1a65c --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:18:20,073 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.9 +223.62495000000055 +222.95280000000224 +222.44615000000033 +222.4190999999997 +222.1059500000012 +222.01755000000145 +221.85240000000096 +221.5929000000017 +221.48355000000163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:20,161 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:25:21,312 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:22,865 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:25:24,609 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:26,401 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:25:27,985 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:28,769 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:29,617 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:25:31,105 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:31,969 INFO [tetris] Final score: 240 diff --git a/qlearning-results/a0.7-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..d4ef582 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:29:01,106 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.1 +203.66950000000043 +203.75650000000047 +203.91750000000053 +204.8320000000005 +204.38150000000022 +204.16349999999997 +204.26450000000017 +205.90300000000045 +204.63100000000037 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 21:29:28,137 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:29:28,377 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:29:28,601 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:28,777 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,017 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 21:29:29,225 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,465 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,641 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,848 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:30,041 INFO [tetris] Final score: 194 diff --git a/qlearning-results/a0.7-g0.5-e0.1-qlearning b/qlearning-results/a0.7-g0.5-e0.1-qlearning new file mode 100644 index 0000000..880869f --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:05:16,119 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.1 +221.2680000000025 +218.37540000000104 +217.10060000000163 +216.2829500000021 +215.81070000000227 +215.82075000000367 +215.77970000000266 +215.58015000000194 +215.40985000000137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:12:53,680 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:12:54,528 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:12:55,760 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:12:56,448 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:12:56,896 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-21 04:12:57,792 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:12:58,640 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:12:59,120 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:12:59,856 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:13:00,336 INFO [tetris] Final score: 227 diff --git a/qlearning-results/a0.7-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..076e60c --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:29:30,056 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.25 +198.48499999999999 +197.94750000000002 +197.9815000000003 +197.8610000000002 +198.40700000000004 +198.67700000000053 +198.43300000000016 +197.6905 +197.90250000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:54,727 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:55,382 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:29:55,607 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:55,830 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,006 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,231 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,486 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,806 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,998 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:57,222 INFO [tetris] Final score: 181 diff --git a/qlearning-results/a0.7-g0.5-e0.25-qlearning b/qlearning-results/a0.7-g0.5-e0.25-qlearning new file mode 100644 index 0000000..10a7508 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:13:11,811 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.25 +221.77135000000226 +219.93090000000265 +218.68195000000202 +218.32155000000182 +217.83570000000154 +217.31070000000253 +217.6305000000007 +217.02715000000006 +216.85685000000282 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:21:24,555 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:25,467 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:25,852 INFO [tetris] Final score: 195 +Lost due to: LockOut +2020-04-21 04:21:26,667 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:28,284 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 04:21:30,300 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:30,908 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:31,947 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:33,420 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:34,139 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.7-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..4c499c7 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:29:57,236 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.5 +194.04349999999994 +193.6319999999999 +194.0960000000002 +193.00299999999976 +193.12700000000044 +193.7154999999996 +192.96799999999976 +193.61299999999963 +193.4725000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:23,417 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:30:23,865 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:24,121 INFO [tetris] Final score: 152 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:24,857 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:25,145 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:25,577 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:30:25,817 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:26,377 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:27,065 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:27,417 INFO [tetris] Final score: 190 diff --git a/qlearning-results/a0.7-g0.5-e0.5-qlearning b/qlearning-results/a0.7-g0.5-e0.5-qlearning new file mode 100644 index 0000000..9f993a8 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:21:47,620 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.5 +221.78365000000016 +220.36190000000127 +219.6699999999998 +218.93470000000156 +218.8314000000004 +218.64460000000062 +218.135150000001 +218.07535000000257 +217.76015000000226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:43,560 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:45,224 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:29:46,376 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:47,495 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:48,376 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:49,847 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:51,688 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:52,664 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 04:29:53,704 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 04:29:54,839 INFO [tetris] Final score: 193 diff --git a/qlearning-results/a0.7-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..bf81d81 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:30:27,427 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.75 +197.35000000000025 +197.59800000000004 +199.0745000000005 +198.75000000000014 +198.53600000000026 +198.09599999999978 +199.12850000000057 +200.65399999999983 +199.04100000000022 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:59,969 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:00,609 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:01,281 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:02,033 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:02,689 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:31:03,569 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:31:04,001 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 21:31:04,289 INFO [tetris] Final score: 136 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:31:04,657 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:05,057 INFO [tetris] Final score: 208 diff --git a/qlearning-results/a0.7-g0.5-e0.75-qlearning b/qlearning-results/a0.7-g0.5-e0.75-qlearning new file mode 100644 index 0000000..d0e90c1 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:30:09,869 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.75 +222.62120000000016 +221.6745000000016 +220.5436500000018 +220.48410000000118 +220.41135000000045 +220.0337500000004 +220.05105000000037 +219.8228000000005 +219.5190500000022 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:37:31,870 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:37:33,166 INFO [tetris] Final score: 274 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:35,022 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:35,998 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:37:37,534 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 04:37:38,829 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:39,645 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:40,205 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:40,990 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:37:42,030 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.7-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..f26ea4d --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:31:05,073 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.9 +209.0025000000003 +208.94150000000002 +207.52350000000015 +208.3560000000002 +208.35900000000015 +209.61550000000045 +208.1925000000006 +207.99900000000036 +207.7845 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:46,015 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:47,054 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:47,822 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:49,214 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:31:50,158 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:31:51,295 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:53,471 INFO [tetris] Final score: 294 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:54,703 INFO [tetris] Final score: 306 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:55,790 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:56,895 INFO [tetris] Final score: 195 diff --git a/qlearning-results/a0.7-g0.5-e0.9-qlearning b/qlearning-results/a0.7-g0.5-e0.9-qlearning new file mode 100644 index 0000000..40fa1ac --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:37:58,380 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.9 +223.66915000000006 +222.5219500000002 +222.25485000000012 +221.88305000000136 +221.82749999999987 +222.20430000000144 +221.796850000001 +221.66300000000106 +221.49145000000166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:44:58,347 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:00,043 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:01,451 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:02,651 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:04,108 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:05,355 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:06,539 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:08,011 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:09,259 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:09,995 INFO [tetris] Final score: 192 diff --git a/qlearning-results/a0.7-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..ac837ca --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:25:29,561 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.1 +203.17500000000055 +203.03650000000044 +202.6135000000002 +203.43050000000056 +202.74100000000027 +229.90399999999988 +215.91400000000124 +203.10550000000046 +203.25400000000053 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,225 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,417 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,610 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,818 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,041 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:26:07,313 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,490 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,681 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,873 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:26:08,049 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.7-g0.9-e0.1-qlearning b/qlearning-results/a0.7-g0.9-e0.1-qlearning new file mode 100644 index 0000000..c5c61a4 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:25:00,944 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.1 +215.13885000000053 +211.81050000000175 +210.32375000000297 +209.97575000000114 +209.49600000000055 +209.04845000000097 +208.89100000000022 +208.95150000000052 +209.1011000000007 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:32:35,860 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 03:32:37,332 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:32:38,468 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:39,492 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:40,260 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:40,996 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:32:41,444 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:42,276 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:32:43,124 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:32:44,228 INFO [tetris] Final score: 259 diff --git a/qlearning-results/a0.7-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..5c081b4 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:26:08,065 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.25 +199.73100000000008 +200.95750000000007 +200.71950000000038 +200.4030000000002 +204.4489999999996 +202.65200000000038 +200.87399999999997 +199.7300000000001 +205.55450000000008 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:26:43,259 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:44,267 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:45,963 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:46,827 INFO [tetris] Final score: 165 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:48,506 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:49,146 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:49,706 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:50,202 INFO [tetris] Final score: 161 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:52,458 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:54,395 INFO [tetris] Final score: 308 diff --git a/qlearning-results/a0.7-g0.9-e0.25-qlearning b/qlearning-results/a0.7-g0.9-e0.25-qlearning new file mode 100644 index 0000000..0bd9358 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:32:55,152 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.25 +222.45555000000095 +220.46840000000304 +219.28965000000224 +218.4454500000004 +218.04345000000154 +218.02265000000176 +217.59115000000156 +217.36425000000284 +217.25680000000168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:40:59,401 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:00,489 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:41:01,289 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:41:01,577 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:02,057 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:03,081 INFO [tetris] Final score: 269 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:03,897 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:05,081 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:06,553 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:07,113 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.7-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..9b19fff --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:26:54,409 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.5 +198.1325000000001 +198.59149999999997 +194.69349999999994 +195.7760000000001 +196.59150000000034 +195.17499999999973 +194.40199999999982 +195.71100000000047 +198.3695000000004 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:27:26,306 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:27,507 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:27:28,002 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:27:29,042 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:29,730 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:30,947 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:32,402 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:33,251 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:34,338 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:27:35,634 INFO [tetris] Final score: 228 diff --git a/qlearning-results/a0.7-g0.9-e0.5-qlearning b/qlearning-results/a0.7-g0.9-e0.5-qlearning new file mode 100644 index 0000000..6a01ba2 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:41:19,797 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.5 +223.51380000000077 +222.1278000000015 +221.6259000000018 +220.6082500000004 +220.54835000000202 +220.51770000000144 +220.31765000000223 +220.23355000000177 +219.9084500000021 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 03:49:11,930 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:13,243 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:49:13,754 INFO [tetris] Final score: 217 +Lost due to: LockOut +2020-04-21 03:49:14,779 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:49:17,387 INFO [tetris] Final score: 302 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:18,315 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:19,483 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:49:20,427 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:21,643 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:23,514 INFO [tetris] Final score: 342 diff --git a/qlearning-results/a0.7-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..d08616c --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:27:35,647 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.75 +200.55050000000023 +199.57249999999993 +200.44399999999987 +199.94550000000052 +199.73450000000054 +199.9685000000002 +198.38999999999993 +197.7865000000001 +199.29950000000014 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:08,407 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:09,319 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:09,831 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:10,823 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:11,415 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:12,423 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:12,983 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:14,231 INFO [tetris] Final score: 281 +Lost due to: LockOut +2020-04-20 21:28:14,919 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:15,607 INFO [tetris] Final score: 220 diff --git a/qlearning-results/a0.7-g0.9-e0.75-qlearning b/qlearning-results/a0.7-g0.9-e0.75-qlearning new file mode 100644 index 0000000..91f6304 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:49:37,711 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.75 +224.23100000000096 +223.19695000000027 +222.8860500000018 +222.29845000000086 +222.98580000000118 +222.6816999999997 +222.5675000000006 +222.46140000000213 +222.3639000000002 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:57:09,901 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:57:11,134 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:57:13,581 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:15,150 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:15,934 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:57:16,670 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:17,518 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:19,214 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:20,333 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:21,966 INFO [tetris] Final score: 297 diff --git a/qlearning-results/a0.7-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..f5d4308 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:28:15,621 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.9 +208.77800000000016 +208.0775000000002 +208.76200000000014 +207.2684999999999 +207.94550000000044 +207.4880000000006 +207.568 +208.248 +207.29050000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:53,193 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:53,944 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:54,441 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:55,369 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:56,953 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:57,785 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:58,809 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:28:59,656 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:00,521 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:01,096 INFO [tetris] Final score: 176 diff --git a/qlearning-results/a0.7-g0.9-e0.9-qlearning b/qlearning-results/a0.7-g0.9-e0.9-qlearning new file mode 100644 index 0000000..e116a47 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:57:38,040 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.9 +223.2869000000006 +223.9131500000013 +224.14165000000034 +223.62774999999982 +223.37350000000228 +223.36860000000013 +223.41860000000142 +222.8307000000011 +223.13380000000205 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:04:48,619 INFO [tetris] Final score: 273 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:49,771 INFO [tetris] Final score: 277 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:51,292 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:52,475 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:53,307 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:54,155 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:55,484 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:57,211 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:57,739 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:58,891 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.7-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..86abdd4 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:45:37,511 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.1 +335.5434999999998 +373.49600000000015 +371.8569999999996 +372.2030000000001 +370.8865000000002 +372.12649999999996 +373.30850000000004 +373.637 +371.95349999999905 +Lost due to: LockOut +2020-04-20 21:07:59,961 INFO [tetris] Final score: 291 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:08:21,706 INFO [tetris] Final score: 346 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:08:40,282 INFO [tetris] Final score: 361 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:09:17,754 INFO [tetris] Final score: 408 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:09:42,217 INFO [tetris] Final score: 335 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:10:00,697 INFO [tetris] Final score: 306 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:10:30,041 INFO [tetris] Final score: 367 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:11:12,265 INFO [tetris] Final score: 491 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:11:33,082 INFO [tetris] Final score: 359 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:11:58,473 INFO [tetris] Final score: 314 diff --git a/qlearning-results/a0.7-g1.0-e0.1-qlearning b/qlearning-results/a0.7-g1.0-e0.1-qlearning new file mode 100644 index 0000000..7157b34 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:29:23,512 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.1 +218.13240000000118 +215.3771500000018 +215.7146000000007 +216.35280000000142 +215.86075000000187 +217.47790000000032 +215.4513000000002 +215.5094500000005 +215.9326499999996 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:48,480 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:51,344 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:56,448 INFO [tetris] Final score: 387 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 02:44:58,032 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:59,936 INFO [tetris] Final score: 337 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:45:02,688 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:45:04,705 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:45:07,056 INFO [tetris] Final score: 290 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:45:09,777 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:45:11,201 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.7-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..b7c61d1 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:11:58,489 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.25 +274.52300000000014 +298.8889999999992 +298.89950000000033 +297.5165000000004 +295.3755 +296.84199999999936 +296.1189999999999 +299.6350000000002 +297.683 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:18:20,940 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:18:31,212 INFO [tetris] Final score: 348 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:18:40,108 INFO [tetris] Final score: 314 +Lost due to: LockOut +2020-04-20 21:18:46,444 INFO [tetris] Final score: 305 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:18:57,964 INFO [tetris] Final score: 382 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:06,524 INFO [tetris] Final score: 281 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:11,356 INFO [tetris] Final score: 309 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:17,707 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:23,452 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:35,068 INFO [tetris] Final score: 338 diff --git a/qlearning-results/a0.7-g1.0-e0.25-qlearning b/qlearning-results/a0.7-g1.0-e0.25-qlearning new file mode 100644 index 0000000..51ac102 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:45:26,480 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.25 +219.72690000000298 +218.82865000000038 +219.32520000000136 +219.66200000000066 +219.37170000000083 +218.15705000000122 +218.2327500000009 +219.18330000000105 +217.24985000000268 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:57:34,042 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:57:36,201 INFO [tetris] Final score: 297 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:37,322 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:38,362 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:40,233 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:42,618 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 02:57:43,834 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:45,242 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:46,650 INFO [tetris] Final score: 146 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:57:48,410 INFO [tetris] Final score: 231 diff --git a/qlearning-results/a0.7-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..2c835ac --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:19:35,081 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.5 +247.90150000000008 +257.94699999999995 +258.18599999999975 +258.4640000000003 +256.9974999999999 +256.7455 +257.53100000000006 +257.3304999999997 +257.3244999999996 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:22:03,559 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:08,008 INFO [tetris] Final score: 244 +Lost due to: LockOut +2020-04-20 21:22:09,656 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:13,607 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:22:16,183 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:17,880 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:20,407 INFO [tetris] Final score: 299 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:24,375 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:29,303 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:32,775 INFO [tetris] Final score: 237 diff --git a/qlearning-results/a0.7-g1.0-e0.5-qlearning b/qlearning-results/a0.7-g1.0-e0.5-qlearning new file mode 100644 index 0000000..185507f --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:58:04,445 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.5 +221.81855000000067 +221.78095000000235 +220.20000000000195 +220.13229999999908 +220.20355000000052 +222.93500000000165 +219.11325000000022 +219.53685000000127 +218.94330000000096 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 03:07:51,774 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:53,614 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:55,374 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:57,278 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:58,814 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:08:01,326 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:08:02,750 INFO [tetris] Final score: 282 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:08:04,158 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:08:05,854 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:08:07,054 INFO [tetris] Final score: 257 diff --git a/qlearning-results/a0.7-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..8bc6929 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:22:32,784 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.75 +234.73200000000008 +235.39850000000052 +237.45550000000017 +236.32299999999995 +234.66250000000005 +236.61350000000024 +237.03899999999976 +237.18350000000044 +235.64149999999987 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:23:55,839 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:23:57,504 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 21:23:59,280 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:01,055 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:03,039 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:24:04,495 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:24:06,447 INFO [tetris] Final score: 269 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:24:07,615 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:10,431 INFO [tetris] Final score: 311 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:11,967 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.7-g1.0-e0.75-qlearning b/qlearning-results/a0.7-g1.0-e0.75-qlearning new file mode 100644 index 0000000..0b4734d --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:08:23,678 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.75 +223.0385500000023 +222.11090000000146 +221.80540000000062 +221.5786500000007 +221.59845000000067 +221.54075000000057 +221.12695000000113 +221.47705000000155 +221.25295000000023 +Lost due to: LockOut +2020-04-21 03:16:35,363 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:37,348 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:39,155 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:42,483 INFO [tetris] Final score: 310 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:43,075 INFO [tetris] Final score: 143 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:45,043 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:46,131 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:16:47,939 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:49,827 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:51,635 INFO [tetris] Final score: 253 diff --git a/qlearning-results/a0.7-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..d57245b --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:24:11,981 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.9 +227.1814999999998 +227.54699999999985 +227.98299999999986 +228.55650000000023 +227.5055000000001 +227.2115000000003 +226.91000000000008 +229.05350000000018 +228.59800000000052 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:25:16,620 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:25:17,452 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:25:19,388 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:25:20,940 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:25:22,220 INFO [tetris] Final score: 189 +Lost due to: LockOut +2020-04-20 21:25:23,996 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:25:25,101 INFO [tetris] Final score: 157 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:25:25,932 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:25:27,452 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:25:29,548 INFO [tetris] Final score: 220 diff --git a/qlearning-results/a0.7-g1.0-e0.9-qlearning b/qlearning-results/a0.7-g1.0-e0.9-qlearning new file mode 100644 index 0000000..85661d8 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:17:08,898 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.9 +223.7244000000009 +223.32210000000117 +223.4059000000003 +223.3708500000003 +223.1680000000011 +223.35150000000203 +223.01960000000196 +223.27850000000092 +223.0071500000017 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:30,404 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:31,636 INFO [tetris] Final score: 161 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:33,380 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:34,500 INFO [tetris] Final score: 162 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:36,372 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:24:37,924 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:39,044 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:24:40,148 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:41,219 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:24:43,411 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.9-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..e7c4005 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:21:38,649 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.1 +204.72000000000017 +204.9555000000002 +204.73150000000055 +204.15749999999997 +203.75300000000044 +203.68850000000003 +204.8665 +205.62800000000075 +204.5355000000006 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:22:11,912 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,296 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,472 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,680 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,920 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:13,176 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:22:13,337 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:14,088 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:14,360 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:22:14,553 INFO [tetris] Final score: 184 diff --git a/qlearning-results/a0.9-g0.1-e0.1-qlearning b/qlearning-results/a0.9-g0.1-e0.1-qlearning new file mode 100644 index 0000000..d59d1e4 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:44:28,713 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.1 +220.79705000000098 +217.88625000000195 +216.9815500000009 +216.4122000000027 +216.27310000000222 +216.22095000000283 +215.41645000000128 +215.9998000000015 +215.62740000000082 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:07,247 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:07,982 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:08,703 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:09,471 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:52:10,223 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:11,039 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:52:11,583 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:12,638 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:52:13,262 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:13,727 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.9-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..00e88ca --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:22:14,565 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.25 +198.19049999999933 +195.8015000000002 +195.93800000000024 +196.81100000000052 +196.83000000000018 +196.78100000000015 +197.0014999999999 +196.39300000000034 +196.53350000000043 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:22:42,136 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:42,456 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:22:42,712 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,000 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,256 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,656 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,864 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:22:44,169 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:44,584 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:44,840 INFO [tetris] Final score: 146 diff --git a/qlearning-results/a0.9-g0.1-e0.25-qlearning b/qlearning-results/a0.9-g0.1-e0.25-qlearning new file mode 100644 index 0000000..3df0b91 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:52:25,240 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.25 +221.69755000000154 +219.747200000001 +218.60875000000107 +217.57425000000174 +218.1184000000003 +217.3527500000013 +216.99265000000088 +217.02095000000267 +216.84095000000266 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:38,274 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:39,074 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:40,306 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:40,818 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:41,890 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:42,530 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:00:44,018 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:00:44,658 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 08:00:46,370 INFO [tetris] Final score: 196 +Lost due to: LockOut +2020-04-21 08:00:46,770 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.9-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..b9e13b4 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:22:44,846 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.5 +194.55000000000032 +192.75949999999997 +193.3220000000002 +193.76050000000026 +193.4004999999997 +194.7199999999996 +193.88100000000017 +192.56600000000014 +193.9615 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:12,757 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:13,301 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:23:13,589 INFO [tetris] Final score: 152 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:14,053 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:14,724 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:15,140 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:15,428 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:15,764 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:23:16,149 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:16,405 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.9-g0.1-e0.5-qlearning b/qlearning-results/a0.9-g0.1-e0.5-qlearning new file mode 100644 index 0000000..b1991ac --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 08:01:00,127 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.5 +222.4513500000001 +220.3924500000006 +219.65910000000176 +219.38385000000184 +218.56560000000067 +218.46385000000154 +218.31455000000076 +217.92555000000132 +217.9842500000028 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 08:08:57,454 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:08:58,830 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:09:00,238 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:01,742 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:02,718 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:03,566 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:04,942 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:06,430 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:07,022 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:08,286 INFO [tetris] Final score: 250 diff --git a/qlearning-results/a0.9-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..fc183aa --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:23:16,415 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.75 +199.29450000000026 +199.06850000000006 +199.58450000000042 +199.93000000000043 +199.13649999999953 +199.01300000000015 +198.7224999999995 +199.10850000000022 +198.56250000000034 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:23:49,990 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:23:50,951 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:23:51,670 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:52,583 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:53,446 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:53,878 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:23:54,726 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:55,462 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:56,519 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:57,607 INFO [tetris] Final score: 235 diff --git a/qlearning-results/a0.9-g0.1-e0.75-qlearning b/qlearning-results/a0.9-g0.1-e0.75-qlearning new file mode 100644 index 0000000..bf2d936 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 08:09:23,342 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.75 +222.67870000000124 +221.47485000000142 +220.88229999999945 +220.53305000000125 +220.33230000000083 +220.3560000000007 +219.8657500000003 +219.88295000000113 +219.44385000000062 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:45,716 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:47,684 INFO [tetris] Final score: 298 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:16:48,835 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:49,972 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 08:16:51,204 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:16:51,812 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 08:16:52,852 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:53,972 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:55,396 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:56,452 INFO [tetris] Final score: 207 diff --git a/qlearning-results/a0.9-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..dfa4a58 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:23:57,621 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.9 +207.0640000000004 +210.66100000000017 +208.24550000000045 +208.43600000000004 +208.50150000000025 +209.06300000000041 +209.48950000000042 +208.91000000000028 +209.3545000000003 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:39,721 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:41,017 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:24:42,696 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:43,913 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:24:44,633 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:45,865 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:24:46,681 INFO [tetris] Final score: 146 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:47,497 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:48,824 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:24:49,624 INFO [tetris] Final score: 199 diff --git a/qlearning-results/a0.9-g0.1-e0.9-qlearning b/qlearning-results/a0.9-g0.1-e0.9-qlearning new file mode 100644 index 0000000..fa68735 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 08:17:12,786 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.9 +223.35630000000114 +222.6021500000007 +222.02245000000002 +222.3257000000021 +222.35770000000105 +221.9555500000002 +222.12560000000096 +221.61450000000048 +221.706050000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:12,613 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:13,605 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:15,238 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:16,454 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:18,038 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:18,981 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:20,533 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 08:24:22,229 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:23,558 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:24,422 INFO [tetris] Final score: 217 diff --git a/qlearning-results/a0.9-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..b5a31c7 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:18:16,715 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.1 +204.24050000000017 +203.9764999999998 +203.46550000000076 +204.50350000000043 +203.4560000000008 +203.43850000000006 +205.10550000000035 +204.1230000000002 +204.7925000000002 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:46,477 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:46,974 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:48,894 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:50,846 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:18:55,326 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:58,749 INFO [tetris] Final score: 318 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:18:59,934 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:00,478 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:19:00,701 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:19:02,878 INFO [tetris] Final score: 196 diff --git a/qlearning-results/a0.9-g0.5-e0.1-qlearning b/qlearning-results/a0.9-g0.5-e0.1-qlearning new file mode 100644 index 0000000..ae660c6 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:04:14,485 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.1 +220.1491000000026 +218.15004999999934 +217.1538500000025 +216.49000000000188 +216.699900000002 +216.08380000000165 +216.4129500000018 +215.53930000000219 +215.56590000000202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:11:57,769 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:11:58,906 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:11:59,450 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:00,522 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:02,074 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 07:12:03,353 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:04,265 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:04,906 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:12:05,498 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 07:12:06,282 INFO [tetris] Final score: 223 diff --git a/qlearning-results/a0.9-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..4a0e6d2 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:19:02,890 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.25 +197.99099999999996 +197.66300000000044 +197.51550000000015 +197.4325000000003 +197.43349999999995 +196.97400000000013 +197.6255000000005 +197.77600000000027 +196.93750000000003 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:28,469 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:28,757 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:29,286 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:29,750 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:31,685 INFO [tetris] Final score: 291 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:19:32,134 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:19:32,517 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:19:32,869 INFO [tetris] Final score: 145 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:34,134 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:34,485 INFO [tetris] Final score: 213 diff --git a/qlearning-results/a0.9-g0.5-e0.25-qlearning b/qlearning-results/a0.9-g0.5-e0.25-qlearning new file mode 100644 index 0000000..6492835 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:12:17,795 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.25 +221.4958000000015 +219.4804000000017 +218.46895000000163 +218.31290000000104 +217.58035000000186 +217.51680000000124 +216.92265000000071 +216.742700000001 +216.57269999999994 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:28,839 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:29,816 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:30,199 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:31,400 INFO [tetris] Final score: 244 +Lost due to: LockOut +2020-04-21 07:20:32,247 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:33,191 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:20:33,687 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:34,375 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:35,735 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:36,631 INFO [tetris] Final score: 228 diff --git a/qlearning-results/a0.9-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..b615034 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:19:34,500 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.5 +194.35150000000036 +193.85400000000044 +192.59300000000016 +192.44249999999963 +192.79150000000027 +194.87899999999942 +193.373 +193.26750000000007 +193.31949999999955 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:01,078 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:01,798 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:20:02,694 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:20:03,109 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:03,590 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:20:04,150 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:05,158 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:06,086 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:06,502 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:07,046 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.9-g0.5-e0.5-qlearning b/qlearning-results/a0.9-g0.5-e0.5-qlearning new file mode 100644 index 0000000..d85a54b --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:20:49,939 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.5 +222.80135000000013 +220.52675000000102 +219.24725000000086 +218.94735000000048 +218.56145000000072 +219.08565000000195 +218.17790000000142 +217.92955000000103 +218.26210000000088 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:45,671 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:46,023 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:47,223 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:28:47,959 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:49,255 INFO [tetris] Final score: 264 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:50,039 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 07:28:51,479 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:28:52,071 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:28:52,871 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:54,871 INFO [tetris] Final score: 250 diff --git a/qlearning-results/a0.9-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..4c8c0b0 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:20:07,059 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.75 +198.4479999999999 +199.46200000000002 +199.39050000000043 +199.45900000000015 +198.11249999999995 +198.2095000000001 +198.34499999999963 +198.5230000000004 +198.27000000000044 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:20:39,527 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:40,328 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:41,143 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:41,703 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:42,791 INFO [tetris] Final score: 136 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:43,895 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:45,143 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:45,511 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:20:46,455 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:47,831 INFO [tetris] Final score: 228 diff --git a/qlearning-results/a0.9-g0.5-e0.75-qlearning b/qlearning-results/a0.9-g0.5-e0.75-qlearning new file mode 100644 index 0000000..0e9295a --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:29:09,845 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.75 +222.3914500000019 +221.23785000000152 +221.2070000000005 +220.99320000000148 +220.4524500000023 +220.212800000001 +219.4436000000009 +219.82130000000024 +219.6764500000008 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:31,455 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:36:32,063 INFO [tetris] Final score: 145 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:33,248 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:34,559 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:36:35,775 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:37,055 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:38,287 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:36:39,472 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:40,543 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:36:42,127 INFO [tetris] Final score: 235 diff --git a/qlearning-results/a0.9-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..e5236ad --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:20:47,839 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.9 +208.04999999999995 +208.609 +207.78050000000042 +208.00000000000014 +210.13800000000018 +210.38000000000008 +208.56700000000032 +209.25849999999997 +208.56649999999988 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:28,270 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:29,646 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:21:30,510 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:31,598 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:32,478 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:21:33,342 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:35,166 INFO [tetris] Final score: 261 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:21:36,654 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:37,870 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:38,639 INFO [tetris] Final score: 201 diff --git a/qlearning-results/a0.9-g0.5-e0.9-qlearning b/qlearning-results/a0.9-g0.5-e0.9-qlearning new file mode 100644 index 0000000..63859f0 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:36:58,638 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.9 +223.6104500000002 +222.78740000000064 +221.90855 +222.23975000000084 +221.89455000000146 +222.18560000000093 +221.7105000000009 +221.57145000000057 +221.6627000000013 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:43:58,065 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:00,017 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:01,249 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:44:02,400 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:03,296 INFO [tetris] Final score: 146 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:05,073 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:07,248 INFO [tetris] Final score: 292 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:44:08,000 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:09,985 INFO [tetris] Final score: 277 +Lost due to: LockOut +2020-04-21 07:44:11,553 INFO [tetris] Final score: 196 diff --git a/qlearning-results/a0.9-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..3c56966 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:14:45,133 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.1 +203.64300000000046 +203.43700000000038 +203.12250000000026 +203.01400000000035 +224.54000000000008 +223.46150000000014 +203.1155000000002 +202.57250000000056 +223.0430000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,382 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,558 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,750 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,958 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,150 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,326 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,502 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,710 INFO [tetris] Final score: 218 +Lost due to: LockOut +2020-04-20 22:15:26,902 INFO [tetris] Final score: 204 +Lost due to: LockOut +2020-04-20 22:15:27,111 INFO [tetris] Final score: 186 diff --git a/qlearning-results/a0.9-g0.9-e0.1-qlearning b/qlearning-results/a0.9-g0.9-e0.1-qlearning new file mode 100644 index 0000000..23ee316 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:23:37,096 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.1 +220.11135000000203 +217.2223000000016 +215.9231500000015 +215.70630000000173 +215.53240000000181 +215.14065000000238 +214.87240000000259 +214.9684999999994 +214.60090000000287 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:31:12,948 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:13,604 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:14,197 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:14,740 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:31:15,333 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:16,068 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:16,596 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:31:17,477 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:31:18,037 INFO [tetris] Final score: 158 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:18,820 INFO [tetris] Final score: 235 diff --git a/qlearning-results/a0.9-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..caf5bfc --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:15:27,128 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.25 +199.9570000000003 +200.65800000000064 +200.2209999999998 +204.64649999999997 +202.45350000000005 +199.93400000000045 +202.02249999999927 +203.52300000000037 +201.23650000000026 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:15:56,578 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:57,778 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:59,475 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:01,651 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:16:02,130 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:16:04,370 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:16:05,074 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:16:05,698 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:06,754 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:07,795 INFO [tetris] Final score: 192 diff --git a/qlearning-results/a0.9-g0.9-e0.25-qlearning b/qlearning-results/a0.9-g0.9-e0.25-qlearning new file mode 100644 index 0000000..52f9bc2 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:31:29,931 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.25 +222.0286000000032 +220.3080500000027 +219.79000000000102 +219.1303000000014 +218.95010000000215 +218.40280000000223 +218.49035000000254 +218.78120000000075 +218.15075000000294 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:39:35,215 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:36,095 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:39:37,791 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:39:39,342 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:39,935 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:39:40,751 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:42,606 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 06:39:43,183 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:44,191 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:39:44,911 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.9-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..03b692c --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:16:07,806 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.5 +197.50150000000022 +197.6740000000003 +194.29700000000022 +195.83249999999992 +195.94850000000008 +195.7474999999998 +196.79299999999984 +194.7880000000001 +194.89900000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:44,941 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:45,964 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:46,781 INFO [tetris] Final score: 190 +Lost due to: LockOut +2020-04-20 22:16:47,613 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:16:48,653 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:49,117 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:16:49,836 INFO [tetris] Final score: 124 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:50,748 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:51,709 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:52,220 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.9-g0.9-e0.5-qlearning b/qlearning-results/a0.9-g0.9-e0.5-qlearning new file mode 100644 index 0000000..53c127f --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:39:57,620 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.5 +223.4274500000022 +222.68109999999945 +222.1315500000021 +221.90930000000327 +221.0418500000017 +221.2486500000039 +220.81480000000346 +220.83155000000053 +221.01915000000164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:01,447 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:03,240 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:04,199 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:48:05,335 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:06,791 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:07,191 INFO [tetris] Final score: 149 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:08,263 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:48:09,511 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:10,471 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:11,848 INFO [tetris] Final score: 281 diff --git a/qlearning-results/a0.9-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..f08b67f --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:16:52,233 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.75 +199.4635000000001 +199.6769999999999 +199.09749999999994 +199.1510000000005 +201.18500000000063 +198.3290000000002 +198.23250000000021 +199.40199999999984 +199.57700000000008 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:24,323 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:17:25,139 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:25,556 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:26,179 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:26,915 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:17:27,459 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:28,003 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:28,451 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:29,395 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:29,811 INFO [tetris] Final score: 199 diff --git a/qlearning-results/a0.9-g0.9-e0.75-qlearning b/qlearning-results/a0.9-g0.9-e0.75-qlearning new file mode 100644 index 0000000..2563fd2 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:48:26,417 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.75 +224.21695000000113 +223.70885000000177 +223.17375000000146 +223.02130000000142 +222.82329999999973 +222.55980000000082 +222.69185000000226 +222.44839999999962 +222.01355000000092 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:06,260 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:07,652 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:56:08,868 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:10,548 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:56:11,492 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:56:12,613 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:14,388 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:56:15,348 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:56:17,460 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:56:18,517 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.9-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..0088f09 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:17:29,828 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.9 +208.85200000000037 +207.66850000000048 +206.61900000000006 +206.74500000000037 +209.90700000000004 +206.0430000000001 +208.84249999999983 +207.04249999999982 +207.14950000000013 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-20 22:18:07,730 INFO [tetris] Final score: 287 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:08,882 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:09,890 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:11,123 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:18:12,114 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:12,914 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:13,507 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:14,690 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:18:15,971 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:18:16,707 INFO [tetris] Final score: 192 diff --git a/qlearning-results/a0.9-g0.9-e0.9-qlearning b/qlearning-results/a0.9-g0.9-e0.9-qlearning new file mode 100644 index 0000000..801a890 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:56:34,782 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.9 +224.57260000000076 +223.3695500000009 +224.40370000000092 +224.05635000000188 +223.88540000000106 +224.18705000000156 +223.52310000000045 +223.81590000000122 +223.75850000000077 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:45,094 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:46,486 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:48,166 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:03:50,102 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:51,046 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:51,830 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:53,733 INFO [tetris] Final score: 287 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:54,597 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:55,718 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:57,270 INFO [tetris] Final score: 240 diff --git a/qlearning-results/a0.9-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..15d1d84 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:35:02,976 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.1 +343.0160000000001 +374.1515000000009 +371.97900000000055 +375.69750000000033 +377.08049999999946 +373.4639999999987 +374.8835000000006 +371.55649999999997 +373.81750000000017 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:57:40,169 INFO [tetris] Final score: 417 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:58:12,489 INFO [tetris] Final score: 428 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:58:28,729 INFO [tetris] Final score: 384 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:58:42,937 INFO [tetris] Final score: 295 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:59:11,369 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:59:38,073 INFO [tetris] Final score: 317 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:59:56,201 INFO [tetris] Final score: 335 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:00:28,905 INFO [tetris] Final score: 437 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:00:47,177 INFO [tetris] Final score: 341 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:01:11,673 INFO [tetris] Final score: 369 diff --git a/qlearning-results/a0.9-g1.0-e0.1-qlearning b/qlearning-results/a0.9-g1.0-e0.1-qlearning new file mode 100644 index 0000000..10e57da --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:25:49,309 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.1 +219.4535000000012 +220.30670000000012 +222.87990000000056 +224.77710000000152 +223.73930000000172 +222.64170000000138 +225.08065000000178 +225.3609500000005 +225.33345000000105 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:28,489 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:42:30,185 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:32,426 INFO [tetris] Final score: 287 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:35,433 INFO [tetris] Final score: 271 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:42:37,881 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:39,145 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:42:40,905 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:43,513 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:42:46,217 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:48,633 INFO [tetris] Final score: 206 diff --git a/qlearning-results/a0.9-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..6a3c934 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:01:11,684 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.25 +280.8345000000005 +297.03800000000035 +296.62900000000025 +297.59599999999983 +299.1450000000002 +296.5395 +296.73250000000064 +296.8154999999996 +295.2065000000003 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:07:43,232 INFO [tetris] Final score: 266 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-20 22:07:49,328 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:07:53,103 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:07:58,143 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:04,368 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:08:09,695 INFO [tetris] Final score: 277 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:19,152 INFO [tetris] Final score: 353 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:30,304 INFO [tetris] Final score: 367 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:40,560 INFO [tetris] Final score: 374 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:08:45,663 INFO [tetris] Final score: 290 diff --git a/qlearning-results/a0.9-g1.0-e0.25-qlearning b/qlearning-results/a0.9-g1.0-e0.25-qlearning new file mode 100644 index 0000000..5cfcf67 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:43:05,114 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.25 +221.42935000000287 +223.57179999999948 +222.4067000000011 +223.67315000000272 +222.3455500000012 +224.16825000000145 +223.38455000000144 +222.16165000000075 +222.65675000000027 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:46,939 INFO [tetris] Final score: 267 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:48,811 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:55:49,899 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:51,371 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:52,796 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:54,076 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:55:55,323 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:58,508 INFO [tetris] Final score: 296 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:56:00,491 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:56:01,772 INFO [tetris] Final score: 246 diff --git a/qlearning-results/a0.9-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..e54c010 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:08:45,677 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.5 +250.91950000000043 +256.9545 +256.5619999999999 +257.13050000000015 +257.37300000000016 +256.96200000000056 +259.95699999999965 +255.26400000000027 +254.34550000000013 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:17,118 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:11:19,645 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:11:22,814 INFO [tetris] Final score: 312 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:26,253 INFO [tetris] Final score: 319 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:30,813 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:33,229 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:36,285 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:11:37,870 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:41,550 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:11:44,846 INFO [tetris] Final score: 253 diff --git a/qlearning-results/a0.9-g1.0-e0.5-qlearning b/qlearning-results/a0.9-g1.0-e0.5-qlearning new file mode 100644 index 0000000..f6e9f30 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:56:18,574 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.5 +223.14630000000096 +222.4662000000006 +222.19985000000216 +221.45400000000015 +221.35775000000123 +221.61499999999967 +221.8509499999996 +221.81945000000104 +220.82925000000114 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:06:21,197 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:23,725 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:06:24,509 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:25,853 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:27,149 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:06:29,086 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:30,397 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:06:32,925 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:35,469 INFO [tetris] Final score: 315 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:36,909 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.9-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..dfb68a0 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:11:44,860 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.75 +233.65150000000003 +235.6294999999998 +235.5679999999999 +237.1415 +236.70450000000005 +235.77050000000003 +235.82599999999977 +236.3489999999998 +235.507 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:13:07,975 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:13:09,718 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:13:11,542 INFO [tetris] Final score: 299 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:14,598 INFO [tetris] Final score: 270 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:13:16,630 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:18,215 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-20 22:13:20,711 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:23,063 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:24,806 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:13:26,999 INFO [tetris] Final score: 278 diff --git a/qlearning-results/a0.9-g1.0-e0.75-qlearning b/qlearning-results/a0.9-g1.0-e0.75-qlearning new file mode 100644 index 0000000..fe4ff8d --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:06:53,850 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.75 +223.03950000000032 +222.67955000000146 +222.60880000000236 +222.78549999999882 +222.02130000000193 +222.8154000000009 +222.58920000000154 +222.558650000001 +222.3293000000018 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:15:09,132 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:10,556 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:12,268 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:13,724 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:15,307 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:15:18,812 INFO [tetris] Final score: 332 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:20,252 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:22,140 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:24,043 INFO [tetris] Final score: 261 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:15:25,772 INFO [tetris] Final score: 233 diff --git a/qlearning-results/a0.9-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..95f49f9 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:13:27,013 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.9 +226.13749999999987 +227.05799999999994 +226.83550000000014 +227.24349999999953 +228.87400000000008 +227.81849999999966 +227.3485000000002 +227.08050000000034 +228.37950000000035 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:30,750 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:14:32,110 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:32,862 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:35,134 INFO [tetris] Final score: 332 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:36,654 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:38,702 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:39,807 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:41,582 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:43,502 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:14:45,118 INFO [tetris] Final score: 286 diff --git a/qlearning-results/a0.9-g1.0-e0.9-qlearning b/qlearning-results/a0.9-g1.0-e0.9-qlearning new file mode 100644 index 0000000..152fd0e --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:15:43,097 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.9 +224.08180000000112 +223.73260000000244 +223.56205000000148 +223.0561499999997 +223.4469000000014 +223.3565500000009 +223.6488000000011 +223.41785000000053 +223.7366000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:07,871 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:23:09,871 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 06:23:11,823 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:12,575 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:13,647 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:14,703 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:23:15,454 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:23:16,911 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:18,271 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:19,327 INFO [tetris] Final score: 186 diff --git a/qlearning-results/test b/qlearning-results/test new file mode 100755 index 0000000..4ebb3df --- /dev/null +++ b/qlearning-results/test @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +echo "Warning, high epilson values will take up to 30GB to run!" +echo "It is a good idea to disable these values." + +for a in 0.1 0.5 0.7 0.9; do + for g in 1.0 0.9 0.5 0.1; do + for e in 0.1 0.25 0.5 0.75 0.9; do + for agent in qlearning approximateqlearning; do + echo "alpha $a gamma $g epsilon $e agent $agent" + file="a$a-g$g-e$e-$agent" + if [ -f $file ]; then + echo "skipping $file, exists" + else + ./tetris train $agent -a $a -g $g -e $e -n 200000 >> "$file" + fi + done + done + done +done diff --git a/qlearning-results.tar b/qlearning-results/tetris old mode 100644 new mode 100755 similarity index 93% rename from qlearning-results.tar rename to qlearning-results/tetris index 837e58ccd4eeaa751dd771d94cab5589b57a92ea..a811df82c3b4bfb5c4622ef91fb853c0be777159 GIT binary patch delta 229 zcmWN>SvCRy06@{BL4`C570S>=0}?7pDeS7Rw~o$f4=kf)__cudk^6BMe?NicdKJJe z{6B~x4C6LNFp4qU!ClLciP+`~OQ{Q4F*5|!65>}GV_d(u5T z&iX(9^Y8wj|Ks_;{{Q~3=fChjzkU7ci_hMC_UX$nUcdkH?T7c@{_1~y{`bH7-=2TN z{O`A)J$qL18T(%TS&x0?{!{*we4f?q(2YYgc3u6fs=KPMpWXESkF7uYlRx+6`wy?a zxVd@u`uqRt)feCAw!imN_Fs46&7Tqb=k)B^gN9b*C5C^=;pc&#I=WtM=JV^|!w`_P(i_>Sfiv zY^s~4xv%^CX1uMN>E`$U;5*;Fx%uM{uRnb8=KcNstM7ey_XYp7%lng?|NZ~|wO{@R z3!89z^ZhsPzxVFT&p!N%FW7;b-?>p2U;p^?x9_;{=G|x8^`?JO&-2~fkFs|4XM9CTic-&YW)E<&sAfKHX;LhTE|ZFKDr^5&Gb~Al{p%IMR(V=uh#{%Pko00m#{Q+f!_oaO3M@#!T9Bcwn-=pp4t4W17No82+wpd2=l%Ih zT97(qK{$Fr3o@e6o91q;CZoRV8GF&xcSF;4Mt%1Km#`}1SR1`RVnevpHrDjWThJ_A z8=GNTzgdrsx~+2U3b)ns-7uiu*H5o6O>JW&HnqB`?kDxUxUcD(f+M-yM^w8U{&wR3 zZpSwLlCk&#K}L>esupGV5!6+;W0TRig(gJnCr^--)9m!%qC5=WgtSqOyHkW(ZNvn% zw{z3%&!I18XqTd`iz`Cex#70W35qUg+4&Rcv(SyV?cAnUR3CIFmYqMb9=dM2<@_cE z)zewcE;~2hE_Kc*4oBRH$XV#C+kOt3ki~jpr(Pp-y8HUJ?S|yeH8{b>emr~j)n7jU zWzSOB*vPy76HyCN-&JcUa;*h1G00{?h`F912C43=`Mzpzr)J!5;!z8NQ3=`)Hec+A zxaw{wfqoyc9!8sYSb=&Xw%dY>ZsyW>*ALbR)Du>w9+$hm!(6PEVUlkScWqk{=;+U& zFA;*+-*5RV>i({u#@d*aQGTqySRa_nYaf!S2kvOblWK z4Q<+cBFHLIx0{2z_iweF?sjtJ1Y?l0@NEqZE|rEBG~Cv8dO@NEi(-&in5YGfw^N-q zy*N9uo!iGC>8+b?n>j70?urhbbLPAlM6U?1rSjfj@6grL5gUIQgJ?PPZM%$~v*`(H zhNCMA9J;o;oxAXYYz)%Wk8rx%`gZO!hYnYEGa%27#2~%34{i+7+Zd!y<+8q!jYPV- zUL%pNYFlz%sK;S^8avXW`sUld_l$AUj_{UV=CWdu2o8VM-_?C>RDXq@m&DSvrLrXZ z)ZL9HB#@{;ZtCxb+S&q%LL|Ah4BKr3#z4MUtNc}aH}%#|U~1HVtLwp-4kcLr6cZ9v zRbS>bxqdB?(3ZX;jGOO!G;QtepZ{{j^-{o7552`Ahm1uvEomE77bWBzz2v{eS`a5P zB4=3>a`QnC+H;`w&h@ul6ZAJXchlcF?1;)R@^t;@K&GWlyW{u_G5b*Z92WH;rY56& z3UgGk&`fX7a9eeLP`h}E(RtdhD84wGM}kk8E=AJVygZrpz#j>ccx>8Vu` z-t5oVW?sCydf{Y=VlhMP`p^8spe0b*s9Iu)I+%?5ByR%x(N9M8&69)@&3!lC*X?cR zzwt>E5KKnJf(c9j>7%)yjCQUEvP=XH^-Q)%f5b(Uh+v*tqu(j8s7vIc=GA!Ay+j1d zaMw(oDMH}GN<=V>mAOj-38IFfvcII{5jD(>iB#%Ve!RPzCR^aBOI6wu&6M{&2H|$j z=9SCLPAuKKG}r>kMD_kN^Io%>Fl(3Y4JilnV`Cpv`9|ZLA6t^eo~Uxrf|}cE$atc) zr_%hQ(xR(Mi9&*GPjlPlGJ;oHgJmlPiXg)9I-^%P$-C#^uc&B2n17wtTDF>__rN*P zk05Bc9g+(o8QZkxFq}CHBhf-8&f?9z*ttKttT1K{-At{=H>7(zDxSenL-8_O^*&Lo z7_@!p$+k3&DpoWfKSZc+s^Pwy?wjGDeK=I^R>g|$4)t#|cwJNPZd$2dY$XS(&s?g< z7Vv}m)E%myOT$>@p|Evv9{uCE;32sogY{I^cJ|aBhudbzB%7KtPcs>MaEZM)xm}JZK`*OiR#exwcngsj zNE<{cj0MlavPVCJE9+KJgSO38E6V%@vvbQ1{a^)AS)c4Piaeqd$tEkOUr6p}1d(43 zZk@MtdPT8EG~RZ>w`kdLBTai`>tceYyr<`63jiz>6tozWo%JJ{$Ty~UPGBVO&W_} zQi@0hm123goby2m}pP#8+MnQn(O(nIA@xg~;qjv0-0MfEMX*|ef4 zD3hG=qKnS#n4{KE4@w2d)XH@gnW>nv~n)bRN5{WJvAcSuy&gg~{x>gX% zF!_kcWxx30^;>6l4o@X>=x|IA8I9X;e6d*!;tLyrIM(P&G%d|0AHN@?zOOndVBlQs zAJXI5iWLntHI)5SOn_ws=R{GXw=g731PLktQru*WMBV8+N8b{D>5ru3n}yoP1P*t- zR?SkvkiZ4gvEC+i4&oL7QpT1@v71ZNJZ(40&vR+$Z2qe#n?nLO<|ox`<$crf4oCFt zs%E3f56?H(ay+FZXj(0Z3g?hf7DX$Hmn4mxk-x#T>x9v?g}9W6;aC?0Bs5opZpJb+ ztrm1)3ay|nBdCrVjzTo8-n#jgb7{9Oh(m75ycHX#`Dj`t2<#A$khGxoT!^dX)VB-$ zacMz^;az(eg%;G(Z4?Zj95n4oe6SYO-!{3Oqn&F5Lp=(bR*K)cvF}O?QW+GKMbq|V zsGh)rP?%I5POYEtJ#Il_C2)hM?Rs02rYND>C0e7-iMhB$Ua6T)wyMUN6%9Xhl_^~` zga(wri9Ci%tN%{w6WVMv>!rH08rM#&YM7kiNC^j@B@!Sc|&k7nd%A69HD+dc6snevl zj>g2SVK2$(SP#Op>=<^nb<}BS6HV&adO8t$iU(l?%~LvnYUo=nc|_Jcc}X`dM{o;U zh#U4GPC{snAo_ljI|qF7P0@;C9)uP|J;}8Ye0U7BSpH$6zgii}T?zD8an)oniCBdE)r1zeUb<~Y=V z5r*B4(tvRacH7hNtte8+?emfgtcvIV(PIvv65V~LSwwtlfTRH#L*?*T%(235IS(d< zB+b_ofrCwB)sw^*Sbh_dn&^&Xr>`?r)-xC4v~5~k+C;|aMi~g0BMi~Em@wIu*$H4sC9(9i=A$dMDVd} zgfbaHn2%hi`4!2&t@FpO2#j&=EL`8|@KR83>dcYdZl@vB`9t-w{Lpz}n-$c7F^;(s zw(}?Pj4mEg0nRrFU+ij1#UMZV?F0SLK2{6Jky`G%p}MlOdHHy2p<|6x8b4#y@L7 zX!jeW_nQc${rGir3_fLEa`w`&??u{40}_ltVi+rlH)yy9c5WgN8m`HC+yRd@S6Dxn zgmKDdlmz7f?+Cf6-YET=#-^sifONC9AC)u%avMFeHsDa-q_}P?8*3~1MEjwsvjD-u z8u7&%0Esb1MS9&}Y`hbxHnYWm^mIWu`Zk2I@jLP&S$wTDd^w9eH4y~EQKnu{#}hHT zs47^Ww+oV-oP#DZwUBWNEc?Lv)6W1FMvm$}2Wgbxz&%+cL5J%8F zJK4X$S>W9`uyb-2+!U4-M5BuT2K!qVYNGR-mSSDqR!)Nf3750C@U0LCpbdzBy4S-^ zKV=;u1=tKoAr(*Zba<lj~FO=W7i}0;{WPA*Zprt;OmNlF1^UHGcd|mO^Is^*zk0 z7frB9I}!{;VxQIoA_kPE??y_>cQ$BZ?QyHHW~E>u6I5TOcz zQI_&LM!^24E`Y$ANE@yB0u+XF7v)!b3D;aMxosvS!;a{{eS?RSyR1 zIK&Exu~^g!>TAbm2^roWookg^1;MA%h65c#w+pqzk^1@1g|k+wey++Ma}caslC@eJ zJD}wo=?GLo$H&iF;Yv*RHEbg^cDR*Mc|=KL5v-fX${qsDZE3l%MRFB8NV7LK(&&0g zT4}y>b4A#pxJ1$1lsJT|mMcr!vuA6D3bc8EO0&)7K+e}YBsON&-;WM3J>VYjHsLfgow{hxl_ka>4a>!Enq#cXz&6d)% zgh2gP5QGL1vTXD6)@xVbk|DPbTMR2m0Kpk`1Vekms4s>^KPw1icsd^cx z@5HRAp#?vu_n`bttjI2CON}}3St|#3s3A+bxHA^~vv3A+Wd*hH-^Cx%BJ&5D0%oiz zF_slX%{*I|a&fvUcaPd~G9e~uY+Iy`_41)<2X8IJQx<0q`+^;vO zy7{!+O)cEA>qkl)6WCkeyX!|0-$I7f+6!(^;_X=M9F~lr`F8Tj89aU4T+{l$7@|h~`VxZ*jx4Tl zmA*X~QSSm>D+l*z=x!F%_7mz|DIF{mPRJ^K#DWM5Ng1~Ku+26OeJ&0b4Ruptq%voUCMwKz_ zdvwM>j}BN18+J!ft}ZF@jS~_0fko*5|4BI5P~26jM1Fm?%9J*!Xb@#cwyOX5qyF$B zXf3C~k_K4cxF>A{n&y(TDpn$wY*lwxIXxFt$jM*P9&W9LP}Op&rvYgV85%`tDEx@C z=}4~>rD3}eM^yHkBve&0Y~0A!6@*JT6FbvUC@zo|Y4KWX>T9kx`+_Ygd54$ayQTM% zoE40pIs6WxPKlt1oJAfQfo@qyHat#g6sm>xRX!1^X;Bb>ubi<9W{_XQ$ zEDSb&Dq1362Q{4?voU%D|D;6z6a)X57b|pD=)OaCBoJ%FB8~b5_pAE9i^gT2gbJQreWx9O`H%# z{gP1K>W_eV<19FmGa6)87o3|(JqteFUr}&D(MPmE(kzLp-o?KKYRSA6MS>EKXoO)KwDCS6 zc9FE~h9hSoxN{U=CG(cHP`v80px0S1LL1$PAgJfY%nZgBxAtJj@e_%V1qlve3^ zkJLBn`xb~ZJ!d&Ky21_!tt!0pE}qh&1TnAGAEMjk58|hCVHDy5(@=LR9fbr0W|n_$HoFz67Oq4)3ea*eCK9 zyMW_ZeIq;@7KGLL*|4UwykjgyEf$1q%~N!i%S0NPZV&8f|Mn)6Rso1rEZC6FZwlmH zGu!esR6Z(01*@$oJ}8^QIE{+IO!`C1t=u$>8{?I<1uBSDXYo|heojV=5mB3!(fO&C z(8Yu)pho43^-vr0M73O98v4N)5NRHas}1K!#yZRKY1vKM0y8zYQ?oO9n-?vJAsQ~D zX~5e&TO!wTsPoP{Js9XLcj2;$^36^WI(1$pNTxhzsw&V5@Q6-ymTN%(3UkdCDI8o$ zXSo)H_06R0IkiQH&dFK5*mag`K|RCL(7lEDu|djhf^Hqi$`uNhpmg3(`LU`#S1KaSkx@ z>4HfnX+eS*tBeilXY1{&k*`D?10@F&aY(Zud?h41O{lW+Ax?wkQPlK?V5|ljyH-P^ zy3J)U2NST@OVaWOtH}h9!g`m!fcY@cEa@Vbdk8_yUr_80Mq&^eEJ3Rg8pORP1rC8k zzUUQ;zRRR55Y$og-OA-5xC^5civ$JpA{-69Ae57rf4+=C&Tw^EL4wlFEJ%ZAOJk5r z2Uo9XgqkF+G}1t!f*8aInsaBN5TdbrL~abC1yRMW_9>-{VCO10@P-CfawuZgl#7xW zD|&5<4qfzg%i$JhB3_qTF;>80$}LsoECA-BDQWLI`;k25qMZRbgt5w%J?0d|s$hx- zVaE!wf-APwoq-=e5>em-M$5Krr;n~q+K~fg4~pkljNTI4f}N7+fzI*H0CgG3Byj2OWMi zBd|C$4m5k{jhs|>d1hjg_NK2aV=6fd!yf4_XJ0ZeK$%Mze537x0Ji!S5Wm@@lx99H z2rz)d-5RosOk+`+UWa19in&z@^9y zGUA0O5|n&IAo|smu2i!J1<6McG$j|Ln>}dyE!n!{illawGb04qdo%K^uyihXtjIJd zB9S$hb<&QIY+3CHK3GjW0xaYSGFqDMh1Rf|VPswanY1H;cqA5$2nY*?bUN6L4i^

JYh_g1TKSCN9**O}%pmWHi zs|sllV>U`XA{Z08F*?R$Etwyhi5XiFELL$ry7iE>*gnoU(h?ZPTq#nLlWiW|TJHc+ z)bPF3`OoGN|3Cc)>|Y zdkADE#t=Yr#rLRpOuZ>w)U*t$nmLTP<~C_Pm!1)@`sT-!(W0}YwzU(*#m-%$H9cyI zVSx>aQ0bDmA2A?s+ruNB$)OhFMS>oA4HKdkM1{bolJPVlvrr1HVz^IU%K@P|IF*PX z6JlW^B&avc&PhJqhyHmBVyehFCPcL&@)K3(tSEpshz}6_!#xvX_!+H_2tiYuj+&0o zY4w;8^@`}i&JvT9orUy*D6=iO$w?VXo>Y!}X30b9FLLWoLRY*$iZ=%2Ux<{v>k*Wp*mfIeu#Ps2xe=sRkPS zw$b4Vcr{`mEbeM%F12KrD195d!axo#k%H%QCNc)du&uzqfa16od1W6F$;1qgRuT}EMP6A!IDKtwfvoaH9zNnh#0dEe0~D2Al2o+P6MM=Kpw(&g#J z-bW1yDXK^lh~g$8d(=6GLs5!!6bI}_$Z{!n&zr9Agm@Pkb{j*e0y~tt5LiLmY^o({ zx*$odYBLG7;v!)Zy1i$R-y)|4O{V#1IutuVoy&#%uuIToQ!g8PNjnnEW$B}N#$y8j zgULb=72rJ9tE{*Zf2lu0z@NQLu^(X_Z=iAd&ds3{eYkr@f$r(TPSW&(`rE!+4oN&~Py{)i8C457l{7!H1d@TeV$ z1tgR%dXioa01Nv76D|a~l5OH&fDTxFPkyVX%-(L+M6g+0qUY2Y0QfryIph}{J0!`h z9#j`yzvcr1k1@4(jU65ZrxqBlz9Uv~0-rJrjPrW=?{N(Q4B*h5UsD+G#Eq25nQw<~ ziM(I$*P&=YHA#zsTDxStirEYS?J^wDP7ox3Af+VZm}D|+MI5nprdQxn0cTo&L# z=V_$)m9Lh`<(AT6|y46980xxwEeb2T76&Ooh(~d87rcG zL`r?9<3NK1&auAR1$nH9t01bfnQ42cK@@W47&*2uXESM#h_O$_f%*|K+pG;9Zif|- z)N_gYj(I^^5E;eHK$1PTXo3~dKBEf|g=`(*E`Rv89|sc59K>9GlE_jiQ*k~^Wnj6$ zi($A@S*oXCwUUHKD`?WrpTd~56~VH3%vP)ny3K63MDX%gdfdUFrHCB#rG6U&D0V~A zRI`mMNFI2Jt zm=jOAm+WRY1A|EyB1+%p;Mo^jr5+n)W(FcA)dp7j)Z47(Kse;Mml0caI#1Oy*NSv{ z+Jw;UvlOk!2?uc$+!_v#i1skJ`_nrY#a{>ab+3}ZsTcrhod}-1v zf}QFSb`D!bb~(Lug(INSk7ytv77WrhTh$bTywZZ0Ly@Z$kr~)39$)?C^ItA3n`eQ$ zSJUfO;{|ZHc?t@4@sxmGp^1DfW72?NEBs(1W|-) zxvRX&hXL_31`>Vr6wASIR5{a8iVqGTp;S=NF9@P-FV}}yINv$$2+D%Q7W(DH<>otY zoT+{?;JL)(jJ?Hb=g3TqQ&3%OgH26wX!-t^uMbbR(pyJdO)G2;pp>F1dGq1T@-97W zPoD)+Am9blinfqi(tAXUo&{xBgr?0+FQ>|P{t@A;D@^+Go$ubnT1%!fe3C>DV4q>MUR{p_qIe0)+y6e?z?A&?Sx)02mqdJ`uwuUuW~oT9{94goJ{!wD&aK-No{ zLj5L1A`?SMB%|sfZDsG4q6QTbr&YboTh=eG$&qe-z0xz+NRvnJgA4rN7sTJ07gx(+ z{GHHHr2Bfkw86fW&33Z!SCe7O@qvWX{7a-C>(ZF2KU>W^OuwB?VRc={VeG%($)N?& zo01D#3N>|1uPHeTMJzI)X_HXMnVS?uk7O5ciI<(#E~M#53&O@_v#qwav|MGi)z*>L zZ8KR=cs4Sx%71tB=h5+i7*i#PEGTS9nMX7pF$rdD$JY}*911#BpB^mR0}$4uI)LDQ zZ0xPZG1DW?fJMJ(mW(BW$tW~ukT+N83n^ScW47vRQc==@Cp~&bU8@E^YTUhe{OTiY ziI7UUge20!!KFpY7Urkal?r;Q^#Z(x`g;T~CUTIk^|S!mtQPDi`jSJ3RX!Cuz%`;a zl42d>+{X;Vb}3n#=Rn$7qT~G{f+nKBbnXF|a^~gI@7CVnZ8Sy98%7T9mhTb!J>usZ zMQThG(@}FXK3Bt9*dy*)4*iG_1W|p?^-kAqH?9$Fql~Yx+dAbcKysc|ONHI_dxdU% z61yU5CPPO5ovE!ma@3hi`6Tg$InT{djYW{6$vNX ztH+9^Y|Vi3ZiC)}Kx``71Go836`=$-jdn_`iWazMf#H0$8k9%W*O+%>uShew$eT?s zx$H2ej7o?*y>`9JP6FYl3TxG|9u8o)r1wx$ewgM38pkGwd#?bGY%zl9jZdqoYg(0^ z{J)XY?ccRP567XryrN@%!+VUk~>$YC}1ypI4+-&7DWEbDF%fq z8oU4pjJ@8uT@Qy5)Mg6}N4$#IBZ4ZS%Jgugc_Z`GDrP}(YUs6xBhjt78!)>!!drDq z3zG2ynZOqe%g&M<>EQ_HsMLH})l7cUj&!}LZNQ#9FsB@k{YijSnmtW8!2DM@X-5LJ zjhr1BI$K5{cPyH|g`~)Q1Yj#h1GJR61dW1N7En;;N>#~S^psbvLt>~65Ik1)n^+Ao zB&`WZ_@0;>21r5CYkq9CZZ>LR|H~3qUBr&aRw}ZPG3V}5$~lp+0vxakVtq+%$l&5r zg0vjE1NQw6mv*Ey#)`bs>(0T}dr?R6$|%Y(x^^Uxg0sIy>rZlfzz$xuJV(vJv`>zs~u z98H#qc1H3=GqndgptQmk6(dQkskfx(YR(gBEwuZp)cIF#T1J`OVF*kVyA_3rxiZ+P zTBW+A&79xT-^3&5i_)KNhjK`l2FWj)%1k`c6ne``FxbXj2F1ceQQ;X*rzuXlVyeXt zLAXe{!AM?H!||R5Sx`s)+g(tg#qZRlC%bb%_;Ps{DdL`N@nc1hyk>ZKQ^~$vi(fFU zXjdd-hBLOK5Z&h34!<=>o)IkTId}On&I0sU^|)kKRM>eJ%#)g(gJrdeh3lp@=oqu` zW$2_{SBSW(92SCPpBrc#Y4OWV=>UamTRI&3^g?OMp<@CdjRt5j_n)HD0ClhMTbNpF z8phR}r;pzxPKpuYQ<%o6ueKgYCTT$ethm%PA{7s*X9HE506c1au2WSEf8x}eAOk$D zDvZpSl2z@)pRX>pOdngnSW6Edk^0><(x9g=3hj@WqmCq~y^V-Msw_ydBGpZd{ICW? zev9wX9&hR{kjGs*G#CC19+rNey#$5c%~7yuU52Wt6mb-^RW#|!V?z*-qYUKolxnwf zXsmFORYCaesWx#dh^DI?;n!G~!Ea;L2)%P7BUv*=f;r|ym%wlBiYWW)xF8-D04YBd zaPHu@TF@jFBNrka%!-|XVpusf@Y`(qt>iT_Aok!}#Db_}hcQ$e{C3~M5wEWV&AeR^ zAeIcv4I#E;Lyo|2!xm&~`8#1jhL!gacYLUn1p$)#1T}L-4^zp!tRohLM{-%~NLmn* zP3>ST2<&!f_?kMpZSxaSR8jg)DvLs(y40bMjkORAmRJx51(+X$u29|9bDKqBNtv8^ zA6Ri?R90@@xf*R>AP%kEHMDTr>ratev%nGVxP+4ZvK4LR*D#xU&S~}&;{_H_kd+n$ z33g6D6%O%>y<}R>;F_xCLr6c>gAH_v~iq;5X(4I5y%J>%P;z>~CxTdAYN zeUOs5=%SBqMhF}sD^TM(TAwxi@d6QGPPBDX#jyUq6fscWmKPTtF7>R>C8eMJIB5h<< zhPc+ulE`%;2j3*~T>6g8!G*uP?7Jw=ys}nMe@o3L@X9)GtVQoE*L6{-lh#WfJ)PdJ z$BbjX?7ApCKh~~@P$O43@uzUuuJpZ`@GYzT9!KYq_X~r)c)0VkBKJV^o422b zv0WtAP74TSd39;r#!Hd-+S!S)+E~J@&1wi7gULy!Ph?k<^D|BrE*C~f$prL@j=vn= zbz2A)3l9*oaz?kZ65=8vY%JNj%+4{^A~&dGm`;$an@1#hd!I?i!*D^+A6w9#WIbZ$ z_0}P%XRbx+EUcqPK9su?F3GyHB52ssg4nu((;beZtsKU^E;&K4$(9I;X!dpIB>&-- z**2EOjbuIB-?5LiBYj|K!4wc; z#y~$PwIIeMr)@{!SqKYS$!%Cr-j66e z-LddPAG*uQViyCy2tN*R8V0N7JR%?(RXWbW<2)38L^usqe(EuxxLf4G_*f}(peUbg ze5@kpgNk;4_L3GOSSXK;cM{MmPQQUYVGk5Jp(=b>V<1Iwz_Npb#ns&!TaAP7QQ8Jl z4n)h;B?wQX$N`yEWvdsnY9Z`l-ZeE2NKWME`6Hakv(o3mC33^YM4`G$UgWsOpi^~_ zL3faz>^IYMYtNz4(HKS$RQyRn>{iK&V&0~eGq^dWVB|q)cfAhW7Q}d!^nySpE$0O> zJ)rZ>`^6)Q`sw?~LmS>umQ2n9pspPCZidw4lG9`#d92S!`1;aj>T%8jq)tl=17&It zb#&{BdM)(f7V6zx}|g*ol^k^S7ptXu8H&O1|1B-O_j+h!<)Z>8f}Rhj93u0 zne`x~^kbb#BaC}5`Vfd#1_VyHO)^S=X2`U{EyN!O;}5$aa`-MkYS4mQ*%+ac_*M|K zSYxO(y46|g(gA@%#cl+FQEPl+HqU}2^liw3cm<#^YESV8dK=LXWx zMgCFPo3L`gfGoLZRLGSZ40Nm@Y3&KWMO*IbOjnc@B$GeVj|dfgtpy1;1#ZpdXDj97 z3b+Cg1l;EdBN19Gna9DHO8;;q?Fet_CEzL+i3lMGwQOrLq`1MOxx)dl87RyH#chY( zkcqSE5}h0Rzd?1o+7}pe$xq2I45o_&^Q24d+b~BC}}|g-iZcNdQx<0OI>GQE248HM`~5t6%lC)UAK<>ORfR!jQV3iLN35zpwn&6lz$6B6zQ`- z#$@n!?4MTBWCY9RF_R&oM}^HWG`8q>1#KeEV4m(8ji?s! z>aKH}zxDSRmT2ygxn{f!UKg&WM7-^q!VJ0hyj3StUadgsb07bBskuESPRlJW>#E*uETak~{A z3Oy?gn?MkA!m|*+6EUt`5Ch)w$;%@NtJqn<3g%$O^xm8)cdTcDR3Cps>McSoA2J$a z6pmZpRZ<|Qr#vyk>Fe3XK$7??(u(%BPEC&`Rq_-Sa_Dgtu^vs{`z=hGj6l63Rv(xA z(nx`P%Py%2<4nz%rj0-Z9WPgAlS?9MAZ(b&yB=;sqxP{4bAQ(^G@tWtOfBcRn~(Gr zatYD|a|PZ|@?JD;*zZep!%q&Rw@Omx5e?KV?AgTpR$403DyWq8H6}>OX$7^oi>5EV zJR-PLD^h}(M$6Qa{UI<`V$EcO3PetZ`hx^K+au#YGXNHHkVSvBAE!8YSOmd7IlDM8 zpM)C@Zn_$a1O?M4_@*ofP8sx>cB|reSh}Il3!0ioO4up7W>qv%Y6G9NrmAsSki2hM%tbr0FG~Yw!2s3TqNc`aRZ+tCWydaQRt-|$P zel~(|m`0wYJ%c83*wX2F^3&i+a}bCiV&*`VE`Z9?3_*3GAx79BQG=AO>${24&8qS_ zesZOnZAC$tMC1y+_3J%Ltr!Xk8c0~I*C;Y_8<%FLUg>M9r<_ZDPtHq!nA)W7BEhxU zi1Kr|9>ZM(A*lw{8s2B<1+iSk2R?M(wH(q;)e^1>+Ri5oaOs;B7ev~rjueP(1VOh< zi6CE{o2~=BBC_F`Va~NDCmKP-)VUT!Hr!9Fde-jr5Lhip#@a@Fn5?0QBPojYPB2rhm$JOY+_ z_7+r}OEm$f)s_NoKjI95)(%g_mfp(TYd}PMrI(B?IL>}SI~7~BgcOYfeMOhIjM#AM z31U0i{tlqAwF=}1W*fD_M%8R^l7K8LmidEVtJWWx61_71qKh>D8-R=^mb|UAFtp`=ogYq@Kv} zL?{Z$Cc$1yOR9+^gy=>MT&hgQiNq2*oykd)%^X#Y;G~8-)Vc{9cxC0Dr6+r}YJO3G za*gq^CJRM7g}Owut@(KwUmpMo4>j$z%K2fZSiZF4BA!yO*Kae27Ggm-lgJaGm# zYtKS|8=anbo|?f!c~{=Po(5F)A3xtT|((%z_|->beWJbx&=D zhKzdRm#?wsw^~6$vg{o|-#^Bo&>sIa)K*pyy&Y|;PE46N*C4&8whEpFJaJ}A<# z_=o$mdsuV47$!{b6XvKff`o~^$|S+)iaC&*`Xp>GHq`9Z_M$eE!ELP@-tVUfI4VU~ z1S!AYtdg`B!GI%Hxe$vn%D4=p<}028)aS$!TdM=DYaDB=fo*@FqQfO2nX>aTqyWWK zjfQVRX|2rx#R00acNiLLYg8=3rPi1z)zZODwAGt{Lm8wRQY$+ew)70HQ-4{O{HXvJJV-610_#}&+1^BZTG)C}y-5G&O^3G6e_h3HJcIH}_NuMuqX1DvPF-+!S{;6P>0uc9GwL zbOgZcdWkBx`gyRr1|p>}&xAJZ|Be93CSsqe<>7lwbyqNaDFv z&?P66dSHtPLJ8&yd4)v=S2tOep-%>Kh@pH2qsuYkXII36NN!g#be7?uSP{Zkj}cdo z2siVh8`aZ+Bv@0AXd5G*+&bDWazIWwtc8cpISUvN6AI`R!FZghW*jSrU&c^c4u@MB zQBz~YGh3I=BAG*1k94BOD_d%eI63Ppw$9%~Hv=S$Re*5GP}$$Yity{$ zBl9&G8ZMVGpJfVgdyRi%U&5qlPB|O2Q|9d=eRuNh5;?H$^-J7WQ`O(&lSkPK_chjn zqBD$43A6Svk}+6S{52LqGT}6{Ag`f4+Px9ifCystV%=XKPu6*&Y^Q2k@6!%5Iu5ZI)mSwEN833&4?GKDR1eDkRKs~m`hfaJPUYWR5CX~ z0iZE1lIfhL%nm(J`-q{fwt0Tn=B_9Tn__w0(D40qxhsn0`q25$C}}D< z!+!26J`h%CuR7Wecwh?Vg|wL~822*5XD>n4!+nu?flIwit@=ET zRNugEr38Vn$#g5VWx6j)OsD1ejcY-)m1hSp0GQ+@^sf;o<&0`(aOskS(O5wg1l;zt zfZnMK2?9O+w%F7WD^iUI_$g@xSD0uCng+cfzm)X5f(1x%9GqR`db18Xv%@E8vK(KFzERp3bB?zP4gxf zRnw~B8i#R#WUw+hE8)c!@D>DA_Sc0k1Ss#^FmZhS>L`Y6paalykLU~i=&;+C9sLzu zbs1lbf-`7@Gdx|<(0cGn4o-ZgMGF!DN*Y&6v{v*o2lXw2lpI)Qi1{)WBsN;xo2;D{ zR5K4}XR0nZ{B-xCYe7PRjQ+DV)$i7X;$u2-l5?=z-9Xvxd0RBB79{ilnT(JH5~r&P zL0xB`LkqlDRV@-|*h1}{eh&1YBm-9&^m;PB&^t%!$)(p1x%~hoFD!Xz&}(`{I6>D-tou(uFyeiZ<1m)-|MYGfVP}?p^^c8 zP8DPQqUEPThJ29@KPQ1Da9)0^b~q5FL_$H{DQlFXA(tBm&$4CuF}9j@F9|s34eL(m zZv3NWSq05wCZ0K&Mb-<#>dkvO%f-~R^DTd*@9>qojMdX=qa`~OHRi*r72#UatFu4s zzW1M;yR{VclPRhX_y&@K+B8_pB93+enoF{;S0s%n`)A}oB4Jv)P`9ONBRO=6fstzT zIHgJU=|C$I6bsOdAo3Eiozo=yIDL;Gh#4cSDCJ8qq;AGkH0+3&~*#~Dia#WTs$d0pqWgV z(I{R+K)1tYLWDO;>JhcQtCoxtO$@Z_!C|Y-ku-5|7q61>2O1y1G%t* zn9Y$p3uoL3D@TUgWxB?j!iK!`Bbwter-57~hPmhIugr=B36Y~hBY1O=Q-`|7!-=YG z?(5ZwDj^xsZR1$dMwL2-`s2?CXB#PXG=dkc{j_<~f&@WRu{J=JsDhXZV6BNS1VsxQ ztyITX_1&rp#Vq_%CcC5vY9)IGK-qjzQc%mph^E$Ds(ZP{r7bNbm1dQ^nrmA*QM@MO z9i)-Q&@^;8$X!sL7`sBUt3VMFsK-t}31Rvw<@9^@*!OV@8+n6rm=Vrn!|=o7lx5o~ ztlz{Gp_T&-$YJ$aUWZZ&h0Q5BK{Sgx#_BS=1bliact*)b#LiJw-dV*_9vFI z9%#LFzT|LMcu zf4F)0)rW6B{My5}9{#h3|NP+}JpB5@Z#?|Nhu?g-eR%nB_wen9>Y;vU9@>ZQp??@2 z#)s))ez<@5t%u)!_??H}efTdP{?WsK`S5!W|JB2P{qWyBeCOdG|DXTuzx#hNK@)92>8zga4tHxzkq-W#gJ4%ABl0qWQaLaerI6e@=G$<3lb*;mDEJ*pL2Vr~ zI@{djt3s*(y>pSn&5TZ7v*w7vA8+Vn@qZ!;V%wy-2)FC@xa@O^9(jkNkJbi;Pr8Vo{>VC~Er z43c0pm#}9l@|(AzjfEiSHVAXmTS?7%KLCzwL^@{c*Hvq73fO=5f>IEr~ zFVxMh7Q8ed&2a{JtoPPV&4LuWI2aOF&?QJgfguVENKk3m713s#A&Td^B*Vi{saX~I1q73ixbrLc^CuwbA>T5j+VLYPX=RbJLJ9ATIEG`S9*O*5sKMM1OGxh{%Ms ztQK5uejG_r+O30GDTl5q1TX~7!txT}-MV#f2{*w+o6WP;K6m~FKG6BG+^}@DJy%x& z9YIs(ucpEfeGY%<(q*QPa6vyb58TV=DI{AgL$zN2TG&8ro%zWzyLkh#;oQ2R88pMo zQ+4LkF@|?c%j@1FN*ah@ktt>%dWNERsxD-+P#u?A$RKTRB|`_*O|y+4D16I3q?GKp z2p8niLQrJXeFaa8TmzL<7PTakw6ig8wo$a(1h4g zlDzL}-gL0wB`J8V8ZL7N^|A`O7+2#}vKZ@9Bq;KZ(S|DvB9!-%n3p&=uXR4Ay>o*0 zXox`FM>itnac*frR8%t{1q#=Roh!h(>8( z?rAx6Em8wqQm`;MYZF^nDu_5EJ~Kw++)nirkhIh*k`e9c(p(MS@-9(z2%Xi4M}7r&6NT?gEe-w1?md$ z!H#Jt`jSxI3zUIz?B*VQ38XZ2Me{`kSgYvax2=@|c;Jf_jGpEubg87mper$m1fdq= zO3wjVZ%WKda$jc3v;m|vNdtCrgy#k5WjfFx@l*7U1)R&5=yHHYN)T-vnKVdqhM?9i z2)Eoz8eCWqc-x9r6jbbvpgC8&8I@wsTaf71shxu|AcreX2Az%t(GiVjS~ecQZXD%GL7Fyz7C%k3RlXE>dBsVn9M zQ&eOo3(SleFRCeIHi-p2T8^}HyJ2bwFn!Yh)PleZVFp#l1fn8A=>_4rXQwcp5`o#IWlG|N?N>j+yIzP)I7ePZ`}lGWd^^Kk5`7|X~y}g>B}^6 zucLG}ZlcEvNKo|v4<)iN`6T|yC6G@};X$RaxHN4xLb6WGU2*Jnmjm*)OgdJ{`1F5!982NnofoDIiB&@R3Fj@`) zQ9I=-1gVUSpH}ePX}h2(k-vjmEgTpXQ%+_@Q-M>snfN7M@<TfUh-Ja z0-S5PlE;aiqjqi%9kuLWS<0zO6?Qo0c5d2sZqdkJ?{uH)gK-v=jY_Bg>69C`J|dF% zUYzZByVg^Eq#E+|aO|;FK~8r2cf=dUWH2#VnGC35o*?#6Z!HAlRFwBI_i?wPAei*2qk zrLgJ2@od?;jln>j4&)4O>rjZFR>q)iO8rWmj-d2xKByA#EY9-Wvva~LmVQJR^@**z zjLJ6PQoTP=ENA9jaU36EduZX3+CXYx39izk2x9s}^aQJcB^PtvBgpBbDdIUymlHsD zoY4Vj$v=H}WCXl=MB)=CRwT6EG6wu;y*%8@udGCUu9x4SaY?+8rsA+Z(E#6Gm_@*y zKLUMy42`RM`3V56ePCvW0CeYVJg@4y6wEG$)97?2UMLtZRS2W_(fh(lOzpz_mNaVc zJ=wtZd!|btN=DnfftHk#NJ7 zHp$sV3j)HQqfR_I#8=CKFJoV@454w)rJJ=N5=K?7A|P^3pmDVz{BYOYh5AbCggST8 zxUpi27Bt>Md>U1=>MRtpiMd*KhsM={u&aKF^1>rIVK+E*S`K-fIx~=wx2niM#ZI>p zM5T~-JhF49o#RY5vsn>>80;4jw1Qfo$lg`Sj|^lChZzcus|?85Fryz08kbgf2Ehxd z*i(cV0_vm0L4ntIl#*da5RDtNBP$x$!QSd9qqD}KQYh`|iCK9PqNPZWN9<(DG zX~-C$F59hl(j~7Ko|zKF?1|(XAfp^k`2KP>l$?HF@D13J!ghXF8#>X96_lN5uBd0O zwIj|WnleTCBq-OlBk2_ZGl*Awuda4&cEqkoc+xW3uu#3{7S!hk`<^8GTF(Nklu>~8 zAFcY`Pws7jpp66VuVsw|{^-#lfj~oMp%@&MYQ5m5p}}shQN|=8 z1z6k`Leo{L1C3JGTKWo%o>xBx%oft(^%9ejxeJc6)>b^!$P{`u+SXvJuogwiv=5pG zwo-=cK^lf9mU&YP2UZ8)ROBpL5S8}Kh!wW3XhktWN{$-(v@4VZ%)`kE!CFuQqhZXo zG>B4J(6#2lJR*qGIS2dG?mOjEB5ul>2P;TglG7^!gK_u~ZRWuWqG!XOmU^+GGVecq z7D)Hkb*5{)e;h$$w5`T8{he`s?9K_LY9=dyUbs5U2v@*CQq^~rw&*~M05%7 z1%>5=*#q>}NgF)CvyAaLW8aaps1?=oCEK58JlZ4&vyX(>IQ&hqM#-TPKq|!54VR8Y zn__EmLv(ezBZ2_@LJHvux_Csb0-66HKOI7!?%-<#!8q8C+(a!pIgW%KFR?Vim_ucz z=0_=QbQE4S$s~OoPgD6O?ZAjYctG|~DQPN#DXDbKF?3sdg(Qs#9@+u6Sgo_yI00G_ zqr)qdtHcQ-kX>_=jNOvb#smS7xh4?}$tEIUa|InHmwN6pKPG{RY6&zKDu%hspFlEf z{TdmsG9??HhN4wPbKiolnFZB~(B#^f!wO;`W-ft~0$^uj4l9WCWM;lgdqpyDi#eG0^x@wCAC0qyu0ZoWvVNGe{imh|E`udn7 zTpQnFj_U_9uFxswq=4p8wekAn8xTS_Rf-ZP)-OvY4ak95gNlYs&gyNRNma9}I}-9% zzpn-EiI#7p;e}P0G!{@A(`f;ZveNnE($2=r2!KOO14s|^V>R7%ye)SX_&{S)WNags zz#*8Dx^(}^mDYwp)Zz*cex&`c7Xm`iKRLCYWI=Kqv!Gg+C$@+nnR1n>y9(UA#CZ9cpzx37-4P?6V z2vLi6Zg&=JtZP<;jR!@(_y4@Gqx8@v7eq`A-CU;qudwHIw;~gQ+64hujzinl_CKFF zM~fB$K!*WG4!$L6Ng4LC_#&n5q~& ze?UE5dYyVfw3K;$jF%RquqPm>HSErf*$G&<*<}_as0XbejC^M2=n+lqT+V`6L6Guh zI@PawMAm|6L1f67cTn#VN*8&avw;C;0Xa1mB$q8cLwji#B+~(+CUMP{7RFSGv!Ifg zLk1`hH`56V5-xjeO&nN|bB%vyU<$C*w$&ZvPXe#nC|&%nwPYv~te>MNf@Fsn5^1`# zu?W$I>IIaVhBXnofsan`jhuwoK>GJgCQE`8(3ADdz?9x;D<{Dvw-II7`Us&1emEl< zO~#9ql>n^7P__0hILYNM;N2$vq570iASO>78<@gtpa^mpn36|xq%@Tg6rt2()A~|hLF0GbckPt&;TD!-lY+y=G5HqBc&U>JJV6C=z z15;uKU$01bb2EZ8ze?dCN_OtD@l?)&Y^qkOqZ?y25p52jE4AvHxI$Dv<04J)Aq8}L zOL*=UT32nRqIF5*XOk#UC#w}c|8xUtLDZfz?Zbe-i{9w4AR~u_r_0h6 z>ITG7I4ejy*15?i5GI}K24v@ypfMAZ)Nz*r(%9@XQd_57_%_?8eAJ~_oVi>2&?!M6 zIWvawsGyJ>#%nH_cK@@jkNOc2lSCNumm5P7a(X(AoCV0P`*hWwwlT+yJp1aep8ryY zQ`hkB{$wH`QFi1k4$IB}2F^RS!oL1gI4rs@2Y@PfnxKE_$Z7Hp~$5;`m=nQU701#U^ z^6bzI;IJXtitA@HTU162pcf}u84|EaPZ5zYn2s6fGDyL*%1J|VpvW!-l9mn+rXO1+ zP9I3y3w+mx2dI9oP&2WbAM{l_O0S4~AEYi^iRJ?}Sgg!7;yHDR{zv-@c;oWT z)-gJ(n8KGgVlaf8ux>%v-cr3uI)P0o@GA=A6*;tk?0mBmiaeI?Q40dr-RFi1N!|X$ z7ze#`G*^<-Oz+$n2bk>?d3r_EcXIA?A&fVw$o1ASRjAIKg~C?zIHa4i0KLIjGYUG% zlTK+dW&=wUxmpm@24EWv%dv`_vgUa~sGf`oY~U_(<@~eQ6oAP4st;W=+@cx6D_SEQ zp%Og;<0UNl*>LJ=vlJ&g%=^l8nA0t-I?VBxKh6N)-+uPY1+|d{>}7=ZJbIqdV;PG2 zMcVbS%fr6~;f=y0(C9xr8l26`QY-?}E1pl-07OpCAUMpOD$}Hnsp5 zB)>kvKw`cq_rOxp57sP^XOXWl1Hs+|)0KM|V`owH1tpMB3ZGU?U@^>1NFGLDV?}~u zhrYh=nHyMxk;xIn%xH#@4K_M$Z-+%e&(T8R-IGAIPWiaqCw znx#n4}A6I7QI*4%(?U5fPSI!?ocp z_lOwma=>=H2#@bjC69CCJ4mqW(4wz|+)9t{`1rkoz}T=DtvsU(*5;(e2o^iI7%Q4f zuSG*&tku~-t;9q)wQoUR)pyL@HCkM?g;Q1AgOk&#txbWD~Qxu&XsJh z%BHLu+X@0UmPtyUB8RbZOkZU}V_(z5ZmqW!1eIedZL^@U;mSxpBV$yPkBC4C^ke!} zF>Zq6CH#|O=0me0M!C~yoD?MS%K_7%su^RrKTz2R#awkZ1JyIqX=TWj;;yQR_aEO( zsES$#GX&9@pF}=xCK4cBO^$9f`#dY~6Gd^HQkyW+NT=GFH;b2|AW}|5AZ2o}Rnx|b zID;#f0z30~jk*_{mefFOp9fnkY*}1^;>vs!PK$vZ^VV@*HdnyZ#4r6KoFdpsXspX+ zBieX=CGQrgnz#r8@+l(}B}pyUaDAns~Y~Vk-j$)rKQz@W)kpwG1YO zovn$>&b5HjGdmY=T`8@Jiy$V2X9OK5GP|gWiy*k3a(pW%8_f0LCW=`y8uvUtLKcKd zics%3HSx1mei4NA^@nJChpr}cc4`?%YU1HO!EGRU|I&7%J4Wggh2KhgPv zjWb9)NhDAWHV|kVi5J9x^p<2@!jYQ6P^6jG-=Y^*U7~7YZ3?6b`YtH8){l_7*D?lV z%YIRADM(MnDb}TZ@4glT;-&e#6M5BiIGfDDfM`Jk8c_xy!w*-2HjaRR(d$|cX|UO; ze})W5eG=cI1u>u$Io&hm} z`YLS_8wfxPPWO$DZZEwfEr$;9#cv4nwl0_1OU5AP&e0--e*#2a;qyY)kz}bvQ zq6!UFaACaroH>MIt5WGC;6&rjq6nPB((G)7a2fj`;vg_1DjTp%9h()8P{N7J;0dGfR;}XR?4&)hh}6 zQ*?I-OEGXA&d1+gAc8$$&MfnEP0%59=W>a`0Qx_5#+(qNaYe$4?Pi8ygIKj+B3cYa0WVCLM;?J6KZ6poNn z*sU9G87`K7L`BUo@h;KzERY+?tVnv8OLi`359tzJ1z~x8_s>5IWd2G71$`DH2PiJo zt#;>vU7~f0m)L2BNuF*5F@nbm3U(k)cGZg2)8S>4`2jO0jlEYb?tqPdhpA-$;cy+x{Ade(&s1hzis}B(X}sV!js{y_Cs*e zH+RImR<%p{qGz0+V@#yZ-P6Fg^o~)$=Ja%s&kUDZKLi=QifZhyV}-cV+XzP4C4Pw# zMNzKGNgIeehUd2uj>_)A5jKKa&G>y?S&$vn5bDK6j?BcVd2v(I;cOk-DJd3L6s;gK z_Wp5jG?AJP=R+coD{)zoRF^kZ6w(kxoxXxtj3F?1sefDN)BkZ9rpW+Hs|NKy&- zR?6fI?c2XeNmCK*_=_70sdB&>ZfXv-qSK|7uWl{AN@&t4X>{N?jEa{e6-ADAFyE_Y z$w)DK91aANxRR+)6genW*hUV?QB4wznD3DRRF^1{n@WhHWhYNYwRE#sg#eM*2oHuX zR`(UKq)Zb+DVQqwW|1#oQlJQeb0X7cO-8RsDa8b7IaCDpbcmNpDsFfLK_lus?OAdj zkW-MPO0S6i)?9p+Qs-`xDyWorMtbXL_`5(a@4?I0w8!m|R7wzOH)?$Q_t&xQY9wJH zGB&w$0^p;{m=@%Z6b>Qy^j{6(Ol|-c?%_pj^hZDX?w22aeNUT`d=8-1b8L~4W<6*e zz7d=7w4tfa*D$Ick}72$nkT7YfZLoU@qRJlK4~6!8kfu>G4r5n7(KJ=SIsZL5z*i{ zcUD~sk-F3{8UQ=`qpI7`gN&B^P*PP=!+=xD+*AR~lv>0DyvD>5(&ZN^TKaJN`#?#4ggu)7$1%1jqC_$1+$&uK`OW=_8I;oVHHr0Y^y!~8= zS$OE2GSkjHXhC^quVhuGctMh*p3~-`^mOa9;KeWI&@mY%-hO0i7?&|-B=nP6H(o=P z6$JJ>bLhm$y1IrDjxRO?a&3HJ8AWLH>)J*ekdI%yU>gZ8NC|+Aa5(JLyZd+s1br@q z=#1D{O@pLvr7qsIfM(1!F%I(`Kpv=X5*GTnTe<=5G6Y<3~cGg_Tv?c*VHMYK8 zF?MZ@FidSj6~U>Az~*i;RyY*Oh(Vw0*tHT167AMdEws$*pq=WN3w9(IFWnki5RK>= zL0aKn$ta=@2hGPEj(wdl)QAp=OUc&7EQnf=%#`VgyP(7X z65EJg*dCdFL~sRUl9VKXb{6ETfBXCw3ya`C6)tIPGiFbgH2Qi4QxSzogSetN{P=k) zpl_N7M$EC-ev`B#!8}!rnMzX}lAN*`s9HB*%uuJBk92NylsQbLo9oi10d*m4^yVYq zBM|9bYD9e(-S*~}=p|IYg@a))fs<0dRU5-X$A?Iyql9^G=I7(8!#)4e32|&CY>}9e z96>^H@;u-{@yG~7L=c6P%nLwJSM){)wYxqW$RVqrgqYUO8u5_);pMrG&Gtj zmnniD8rR} z{)tu&YY)_=g@XB3Xor4GnTT$)Byt5RD?$|Wp|3}Np6Z7Ed9g|Gl5F#)DnC&hixPtM z4zrcg2-an)yb7ddYGYw#3g zP%N+vq&42Mqx-nDN_u;k>(ghFNv0kCAdFzyCb7zMps2A=_+995IB3!Rw^lVtj*)qPz!w_0IDa6HfRZDvYimBd(SIr zC9vR!${rOHW!>g(5?OM|DS=DxU&W|!I$4Y&r(9BXaEzGZHW7lliK@60rbwln+q%gK zBq(yo&x5r!ADI%!&)0BXnoIh7(yV^)&Duk5Avb3S;q9brG3~*YwWfP$EeqDNa;9ZY zTOld(oY56}K~mlKj>P&3Z;;zZ64Yj*CT~H9A-C(+Nh&>UH_B4>amcL|G-MtT90o;? zD40T$!7Sz(!M>I}bR>q01jV$hmIDC8Sx^vidjWI{ISS_2U>4sdSZH86=wv>vWqBmTy-I9<3N8%}+qRI(x6GEamvDf@_XiGTHTZfLB@BBP#=fQ{V>jPPv4nu(b(%3& zT9VGrFBeR$1TJu>*q^5tLoSU@lv0GTG~^;)rA-6aff>NUotu47IGZp0)-DQ>FL(z0 z%FK(F^T>A&d1OJjm9sn8Bh(u%q#q}2l0JqQ*&Gl|3R?#TZd`ceRuDmFQblnxVhh^$ zh6h!+ISZ6`r(BxoFtly&pR6mQu_ZIvqbaPP=mo`gPRKlrVIF;gE}b{2}CcDm3&d>_7hh{)IYj zKw`KmDT~8cxJ_%V#=0gmTAZ(Y7bxJS~at$#0W@CsP zxFlx5*a{h6PU(D@EhUM*Kn}{(SPm#a6^7sZh0{B$_Ob5nYZ>P5?W2DfDdV5ev^=kR zQG(_aGpNwaoq?U01qsdE1--6EdwY!H;X*OVieM*zn>~k6HGEN5kto^~HOzHQC0W=x z4qECFrAZX6p!Svt$@_Q8I}2A1?%HT?_157>=h{h&dKC@FOzaWS#-%cd2@*vCxTm%* z$19>2gfVm-7N?z5kpu|s?c)d{!SL7YFT#-riK(@-?`-4&gvYe@jX2CY^5~wT zegO|MF&rFXK3jFta0D__F|#4+et_bG$$Ip??)-F*{4}*&rR2-0f}(BpL%zcAV67+ z6p3gZQwKab*FXsZKip-Q1Y4iYXo(ee9MGs+22p}YRs(p8tZ1dsD&)Wp^A;!uSfcS90UjJ>exTguC%rbbI52tlh{sc6hI zRcVEkqOaSb1VPzjWR`lf1I&lvi^LX=xsWUrp@oCYVnl-a3ta4!q{5&Q*_uYGWWvf@ojo35NE5iI{nA5KzG~Xemv>-vVr4OA~|2fLIvYv&B1?7-Z zCIUQ&8*pF)9ByLtC`J3~A=Rn2nzC}f5;O^+!XLz^grFwe%+xX3sjwlT$gzeRk7r02 zI^5_`gW=*IA1XznPdC;R2JE*pX-EP^j##Ln+Ok_Twlxqll_%6Zqro?WAY`}9+hZ80 z4Iik1x08&^a7~Ncv^0Zl{i}r2P-gwn#bCY(yh1KPfM%{ReV$7|rM5X6m2`7(7o!2~ z?-$mNBZfq_auh-qnossy>7^zxjYWne;zd#9QG)2ni)4W<-QRppkd_0vQ5Gt7DC)Uz zK=sZwcr#hLrgMg571T%`VaS@_oQs*r%g4Mto;vaX1(`rOtP zs@;fXT;-tO+c6z#Hz9OWsD(Bhm})_Qm8WRl&4k*8q?zv4IT7$G2)?*nALyR`_Lw2b zjcOui6{1joQBI)_1a%lIY5p7Pk6-1W)rOHjObG@vvv)V&yiZyXp2{VRii^QfO_Hvn z`>&e^TPIg+gfiXRUZM)2iW3kZ86cJF5tJtLerv3d)YC%20t$6g!w~;eY8Y*8A-E(X zIwGBh05ff zRkVT#M*Q-j=T965I=N+XD@exBW|&!ERVX=h+J?jyL?`HyIdo9U6$wf{B6v&~Pq`gB zr_^z{OdfuX)&jq^Qnd)r z+^7ND>gtA2c6Tz{4}{pq2|1Q$&wlM!zy0O=FTVY!Z$A6>>(Bn>&7Zz{|HBvWp#yvQ z`pb7WpTGJ1^$%XXdHdqU>+k*W-OV@u=+zgWz4`3Z+nXQ0`Sgc3uRnkD_C3X|fBEX| zm#^R7{Hr%_-`;$9^`qA}Uw+Pj&YQOS$G<24{Nl^ce)XH{5B&azn>X)oUfq29?%nrq z-h6*~{Yvh@-n{zLw@mf?;p_LW*Ppxl=8G3Uc=rYOzWMA1a$Ek_^S_b*&A)Z?{dX^J zZsb3ID*u_-PcE;w)a!rNH@x1y59`qSBl0iuD~tS$e3Yb5Uw_7*_t}kot~akf|NM)0 zKYsIPuRgrC7wxah_w=^Bdi(hgU)_9*Wxe{dKjU9MWozEQdCS&(>-Ehm^|Rkn|M=#M zul?Z7+t^yV=GMy37(G5z13%O=U=mcrsbRooDQ8^{R(9rRx_?!gWEt)P5EZ<)iEeniyka@k|r zP>LKjEy(i>?OC9WZhz>UrZba0rVpmX1x>FAV(>Pd00QOqw~#%?FOaD>=d;JH0je;{ zMOvs}@&EX-zee)XLIul!kBdmg{(?KgnV{u|`H_ zN};fwTC3?5?*e0*p|7^NVksw1WDz!?JLmwpR0AWhA6=Sc+JU_?i9o8H?7aCD@}I?k z66B*8+yTYyMLU`o?v*l&27@Dj5>^mf8jOxi`na^iGoCV(1xd@QD<=SyC`Cm_Rr^K%%K#7R*_f z3{kceq;MnXOXIvD+y$b@T`+DLy-sFbf6>wPYLGK9l-?Qctnca=z+pbrA;G=4_(#T* ztG@vNq`3*wGj(a${)V!lBqoV`5i97;1YVvc6DbT8!Y{Szwc@D>jy=QhOJr8`uEq7P z$YF?r%ajvJB43I6cBeupL2af>P1tLTlE}Fg^>ez{Dv{gWCy{f4^6hYOY+o4K=O&Sr zC6Q-Dn_7-GiFU(-CW{xu)w^0wv*aS~bKM~8TO`9@oytWhiF}Gy)!DhRbNwVTYZs2+ z)pIz*hGb)qI~$R7Cb=vWa=oIEg8}&o@GL?hH{J`8lFxOOG$e;8-;7E2$ns&$55Qq}hXpG~$4SrS~x``C|2l+gy8 zCDCX}!WR2yl~Jxmn|5V0r~3-@Scavbw0|!DI`k-fcU# zHzZcjoZ)wn_Mym%-nkYly`YY!UH33J3f{y%%vRqE3h_Ir6I{cSl{>+g5!*ivQdXz` zY!qDXon*Y{(E>`CsT%zJ3VWP`V3kkuV3lg+{Eu0Q6*d;{{wQ zX&wT&`54?0*660Yg9zJ%0kuF$8cHFwgRDM(BVhoSdJu}Mt_FFcE)jm&o+<(gZt9lK z&RavD%cYiFn9=bfj}i(QiS@??ry^hD^)UAU{gO)#R^~{$7WK@qO{Ykpwo#g!X%BxS zV&dRoU*# z;H)M&TVfdWg1lUnl_T8-szwlJrpq-mCp&VjAaFOy!~L0bO&?iN`X-ZngF5$s z&G;l*Ft_$11Z^-9@@Av1*E$C^+*$}l0XaU!tW9ZLW15k~f}VOvnhms?k8wp#C$0$m(&bl7iBshL^4N zF1aF7MnkUev=9!svrtg+n6#cgbac|Tez&A&3kmCs4qecTusg?i1J1&ddPeoR&Wv?I zOdaqJos%+JAT`BuoEg|)S46!$Qy-$0W>@6^CI2_if4&fy^)unPL#3xw!TkCWYipsr zrz=4W2yqA9Rv){~3RB++6;Ms9q#qt84M;F8#oH7F$&24#qrFWaKs8k1;ag%Y6C22E zWqJs%X>OzGZE|jaitf?#G+2*JV7~xZHLN5G3M$BRIg-Y>aZ0d#D@Dc<^xESn$ zXdH7f@*e_)@=`xASdq7afUf88*P;hY3gtn~?g*OZw3Qc_P3#fv3gv@J=v_Rc^%0G? z@S-Pe)v>~mlH(N0?TP?1WGeC;_UMY-tklj{8B=BD$V^eVPSYl&>~yCe5$?neo#9j+ zY@xi6>Nauc$Q4u=-^^J+$2@MKToFGgF1g1mlrt}0MmghT!T<2F4G9JrxD~zQ=%m4G zGEEwiKmrS}d&H1nHxvSD-f*+6R2vvuVD-8t4ejsd)|wk2B-}&~uQS(1fp-_KZN=@1 zS6)pra9FEXx={)8Oc*0bT?tRNFVgWPmwE=HnZO1X)R^{g-nC`T`dkJHYBL$+1zlkJ zUAs)$26MK-m%1W>W(0Mym+k%94qB)^^h@ojLvuP<(CES_!u5?fLdn^>C?mIC5P8%c z367&Z)&TP=BM6Gz^dlmmjhuyjeIxvg%p=09$O<}I8S?5IW<^tW&_&VdURw!c zhZ}nP#pk#8+!y*sV_UaN%%yh@l@tj^Y`4`ltQk(}e|?L<+GlKV4V!>tNse?|g)>@2 zB5OwL^zZ|l(dtk-Dx>veEg*1{GPcX7(MZ~nKtK|+BJ~Rt^&RHYs`TkaJ4$bd2|ZBv zJ(ub`To9}F2d*@C%rG!Io^oP4R(I1hELQKsTE|T_nX29QsEkx5%HZoAcS(22$Uoso z?YFz%&ZztV$se8U05Zj zhQn?FQI}y^fT6ZXP)wE5OD|+>fT+x&bI#o2l%XX0j+nb5osA&=kDc?~NY6HO&f#8C zWvKX}Lr{yOkUMmJHk|qWKfn30;ZeObcWOnT&2!VXY4Sf(2AN`KZl+gTdC(mE}?K1&C7fDeT_}>Ij)jqP9&)TO+e$Oa+

cFcT(ylxn=@W4($icZJrEL{)WuygR!;`KzF4}TnMWI($3mS8XABdyEN^kkYLair) zgmo=l2W3SOK@mC2g<;Zy$i1ewE|LE#3B&Z6n{JmeLs(bC^c&u~$clmsA}!?%!*PbS zEgL@KP|`Ja7T}!9^-%7bHUIfRe1v^nA*0BQKbNy$D%OSdj7VL4OGllIBB1

*Bnc z%^F2~{IUj{NG!_{l&q+S+f15@L)j>b5zIInDb1OW>c{AC=q(6}N2G3YT00nRCYS2o zVJ4+uQMC*0mz&a84H*OplM^=xaF-#P3}544dM5E~DJ}*5WiVU2i9X`+i82pm0{|Y) zZpJ16LOiR?Rn|cqK1^jsC1oA4q#tGk!7Uz?<75NQsU#yPR|aAlx2yV&k5Ds$n7Cl^=62!hP*5mprU$+a9ts5$+TL06?IbiQJ)iV@WA zDrlGKNVLcs_b^0Jb-}H(J4dc7Gluo(bhU8aX=AQFX)!vg-g+huS&&inXHau`n)(G3 zNf`56`%sMI79-Z^Cl*5>v5uCfF3qKmM3VWaw!2muxVNm5zKbG+xv3&=&@>AQf4d}n z(+Xfam>Rc~E!6MJKuz;iu}FtO6)@=bpyY!S9;HzzGZZ(1^_XSZFYD-lnTZjwfN z297hI+aIBxv}LzOT#HBY&LL;#)Vn|Yw1EX4`FWsSjsK;oh$gZHumgnHjW#gfVB zLYNxWG;Ycv<^dsRUIfYpqAvL=(I{3VD5lXwkQ89LOIK^(l7TN)Bq+Wj**wY>nUP|x zO>JD$lA#_Ef@nS(GJ~;NCW0rEHKrvau_DspI9WcayQiNR6P^~d&Or4ak#5P*D`MCx zL(=?r*Av8lze&cdJTi&2eHiJtPrU!Vp^`{2zBzIMeWnRsQAU260AQY+WKpZ_-bEbH)fu|&~ zRMQ&14b7A=_KzDrgV1}5nRAqPm8+g}nyKo!2g;T4$$34MIp3<`AmdIXeR z5lNIRj3-*;Ef;?))$ItQ5K<~*$+iCc49S7tSWI@Zd;ObUh35#k^WL9o8dAU)sA?=$B~VHt@{*bxZ{kd-_n z2ZJNqk&F0fi^aqot+dDi%xsoOn0U0qe*@3-h8P+-d-jGUf1 zU;Q?32qI(w*IMi0eLI`_ty^@knG1f3HnZswl{$@icJB8+-|_~y%5_6zK;~osYFpma z0p62`BV7vvP{YFO3N*%dALm-$#2tuV%5Wn0UT&-=0a+fo}E>|T1s z7GsFqOS9Y9k17=xUom3`kGMyQMjS}x)TQDNYv@)2!)qS?f&(#zIIV{jyYS4JL-YC$ zBtIfL_)MiLQyiZhhbJ3|(awRG5s6NFzE)D(h}HL0u1w$-w>0o6^-LqOuZgHqrxr6H zaBazYzLigxR?!tbZ1azZ&VvOgL`UJ&i$Bgeka*8Dns2VEw}=(SJjjWjY4kL1J0P~w zGwoGyQ>cZJrRa}X(X0o7Y7y2v(m5;@_@L(PQz;}DMGKDE$&9ElNQ=V&IAK1pz%_?8 z51UG%IHMNa6bB(&acVxS+gfp|d`SC@!?9kak-lyGa3J+;E!OS0 zHis333<~3*$lPKk9$);z;sff2vqEdHbixe<Gb_RhZWs&ogsRC6d3O9sg9+x=KLI%Gej1;L_BvzqX%TX zoSpmIaVZ(Paze6(P<`p+2wyLUb?G%)S2uhoNICoTOL(4uhwS-~Gc3M%2(sykJgj5b z+7JZ)0i;TbApOhJz&TnB-9KlS@32w`1=H)Ix5lmUr;)6UQ_rlO}xRR^ck)R_?xRQV1u0+P;p_?i&J7$VjQQ^VmS zLbmtpIeHg@7(;|0eE^&f3`woaT4M9E#*^gTAT21eJIt?k=4-nSN>xl@&2ALy*ZoghCuU6KP zL?e+D6I}H~sBMF!T95pkAXL1(vw0*#(VJ1NwQ&fbTE0UcCa=YYG6QFXsX(l`Kt%5} z(sIq-TlGb5NRDj5iYSgiA4DVDeMv}y2q$hot1kgA|Awww$VE02+4BN~9yxfU Bf z>?}Mn45j22933H-(D%~c(HBkI5@3g{G!&!#i?$p_bf)GK8<8lvl^#7( zcmq=9jpgS*`sP`mGvNzP$cC))=XB@ z&lW#RHCciOgr=+m5y=+`9?-^hsU}dAnxpc1WEd0|u{5f%t1FNSyJWKHabYiw<`ViM z);&;%(QlE-by1Bl4EZ6w%I&wBu*!WvqU&_E6WX+#F{gYOaP{bio;i8p8(5Lxx%~ap z9zR=q^ozbq>xjf5!G6hZs~z)d&a#i-XRRURK86BMoFi3!TebMv?1-dGK!Lt3InPH< z=T6dGpAl@RQb^c6Xvo0tdy zOx=D&qK2HOFm^Gp+AoCB@#6{}iXG9|(oC(iy8AOgUG*IEM<2OL#;0Gs|N6IIeDn2B zK9er7Uacb|q5g?(f9Q38#AZe=s`3Ao{f)-G7Pd=&KPh?efL~2QonAt;TM79ISvSEt zJ6H0M#!S{JFaf+b+L+1f%d|FOaycbt&UBAuDY>YERRhgo27bFm8h81nT-sP8q=Ym` z@j@Ho2(j3BlIcLP^b(A6c_{M99x(0sGF|dOBxn{NG8Z(XYAcx0jZjJ+#*pY12YE*C zJSNAnwz5M(BG_GdVP+0ERN)aR#V}ry-IqM# z&&WQa83lpDBdQEGwwVh`9y%fk2-^aL;&cZnub~K)2xEsBU@45~!2CN|XS0T;KE4(l zY?rLW z@2m&O1{&1Iw{w3ZlnV~|uyiaC3zD}91?cT+w{sE`@-4d8xfo*7OVm92){)LNkbxfJ z=~&6P;E-$@>h|_j$K364V)Cs!N`uCGG?z8hAyhG)uxm^3(`zu-1>T1kqJh9zIHwI~ zUa{lB)7T*h*(uS)sN?BIcvVFYH6-0JN(1W1+_Qrh+i;j9Qes4O#~=*N9Hlu)XV`*e z>NqD{Ga@i}N%*M|C#E~i*??1~O$5dgph*HX&GBAL7e^`XiX)kSVK-a-v6dH*=^5l~mK+mi}<2Oh$wl zKthDe?iYl``F&}hDLd|f@{Ao^Nq4}%<$8-YnMygFSazEJ~GQ%m=Vwmr*1!U zwVGwYT3AeJ+>HoS_^n{*iUhB@2y`cpbtjb$HOXh(ZJ}YT99ZDLwzj^ZXbe zGiD5tt?Ghf^3F41hisJ>36yH#A14EhmDlm8!=S0oS_-G5X|o0xuacqaapcqrIlT3xLnNS(ZWuI%HJ>#@HG##mS4^I1LWSS@2LansD_JO}T5q6e zUaqxNxz{@>Cbz-+IGzXQ^_LwPD(hJQW~tkv@1?z0-4g8IYMa>Dp=F?j1dF}TC$+Eg z0{i8i80sMmUf_ixWs!g)jheW)pC4fVq* z$tv|5cd}M4y%aBnU+v+?v3D(G4Dbj{55MU(6c44qHA!F5rVBiI)=fm>inN=M-mjFx z*&gkFFRfqY3W8GW=B^FJ8|Zo7C828YEw|hARaMGG!HV8W_J>lxCjA7lZ!3l4VGv8F zJnL>RkSv7Mr^$vX>%F??!M{`$MrTXmH`L)slNXkMsO)*FSZGS&Y6!%3cKIJRj>86S zDuQ}5H0-lCvuNQ*F|V@L)X=TKZYk`m8|F2-4S6HF7d!HAD;bX+95g&b_u?|mM{@Q+V`lEJ zQd0*hr?7;1`_F_4ei)I+F>UbKM?(fz)TYRK>(E0NBHydjB%~jWz@ngA?>B<1FNlN; zxf~O?*w%q(!GCdphi5l8cCQw?zC;7tMom${TxzLZ*wm0JpEu9o&)w0#QOUy&a);aY z$xE4}5C#Hv&eU(7oetVlgWO3OavZEpDk3YGi)7IelH zv`>pbo-qXExo{F90J~ynwL>9uH=sySn>^_UFm!uM47p#0!7(3on7X2N)JwQWLgz^?j8e*wZHf$T{KFQtHzMETOmIu!<2Y?lQ4ld+yr_-8rveB4 zg2q1fsANw{SqFl*2#XZyZhfYRjYn|)!K9q-UX4J!1=R4-L6jukkm3f+QyVs{q9M2C zez#$X@+zd?Yhz31(h5c)S8$Eo$9OE5=%w7O@0X0K4NtK9;l1U@FR*KQg^p_|6l zR%-n_iZt&!q@SlkS^J`lh#3&I*|6|;An;wm;_DKW0ZDB(pCq!3mWP8G9TCWbws2ZM zgkbwEupG=7L$Jf{h4f>LT@WD`><{Z3fzK>Dpa;+)@{YGsI5z5c?IQvZfT+*YU*`yX z{apPT#b~hWfY7|ku$xu_--j>+4K`fe;gJQu>w6FW`-2ep)}qA)T&>j-;q*OdnS1J9 zJ{_fgn7kesj$1gBSq~BkLDK0cMZONtC>6RbeT6CkUb0b$=A+ouY4z<$c4nLJJieZ4 zN-abj^Jw!?t*GnrLt4JNM&iapjgu>yui_;GXZH1PFrG&0@4V*rT9pqOQ(7WDjafyC zD&IbUTx^Mi1F;cAxfS#$>se9wGxbVZ7?9pBjX+?eAE(=5VFQlbc1q+ zXjxG>M$-ZqI~8(-H>ETk> zaFq|QoL(bmxH26#Me9J3zelpwAYG~~b!pL5-+djYnZ zvc#HZ(v;F20V%tiQ|B>>C)#}5ER*!=0|m9&M8@b z31wo7gUk=vw5*@$?saL^cvC+!mw4lmw=75qhUc17(f1))w#Y-n5XvtlA)QkqgF!;t z4T$5#PFOCM#Oms%F10_I0ig>%6j1*n*ImQRFoq6Ox6Yg__xvNu33=_3SSjzTBLa+i zE5$qUqQRkN;0#4?6Z@+y%Pf&A2X`BaA1*r934yp*bSXO0tzM?_<)M8EPcp0*@b zoeRl7B9tw|!*lq$g0A&l$anw!=FciU$RAA3Klqh#l`~W|Dq@fiMG~i2Ye?b{HD210 z5T>GhO6UzzG?ezwI+AE8lB$OgiU?&J0~l$=r*+9qxA|nB|A}f!<{6haFWeN$g`kN;n6cJ8wV4omISR>B=~hIlODo zLQS!g*8FN)3A>H%yQ?lPhRVGxD6-Xz=qY-lHmgJJ;H0;pCq@$Iq)=tZ+Yf;%I9VlG zh8Tjo9ZFvXFZE@TxHUAj`OI0S`+FTp+!&HRUjgJK98oUniN**4N!%Kmij7*TR`(Gw zCU`Ns7U+c3Ev2b@K&tVu7Nqsy9C!APBz|tu1?MC;A}Utl-gkIJci)qoA&Dn@%98v4 z^u6egWcfkS-S=sWf~p}tzP>0TZe-sW zTsHNdet_E!E)s_1v-sj}o2NNB>_7W~DK2+fTsjNS0qj=pMMg}_zzvj{Tt8xlo8D{* zWz{d?rotVs@EGxTiZ085g^3FGg43C2hnaC3`SLy@=vs@qiMu8{AkGuJ z*yGq08zHCV@u_no0yAF-rZx?%P{MGjkRx)2pq>=?6Q`xAmj4J=dQnK1E9)<}E_ox9}FVJ5k1e zNcWJAK(sB$9@EI|sH#=3u^K38c~ox>pRy*d;oLx;@G8 z?v|pONOI~_<-V!td6X*{E#FveGOxR<=Qr%s0m2&!kEphb>@JnG*dWa_bPt}<*9Qkx z$FDGtC^)P*JAy@So|c@1`lZ8)GXxYg*~RIX%a^Ks+t)d$-P9dsZlqmpu<->mCm2va zXGP`wf8P9QW!CBsDFNi>VccQ8Qba5}So^rEX>!CZeG$AAPc$&NfUM;`mWPHR zV4ETC`tS|}BZ|-!)|*K?uY6XU?4FsPhiKYvdeDo#nDlTo?Wnu(454+oKO2S9L-e|P z;OXfB6Xa;IOI$r%jwZXi?QvVq_vHWT8AFBDvs|fZx#{72Q0Qi#V%02F(lpkPoKXwf zTnE-@e&&9<*J$tzO`$eY@W4BYbSGm88)N)YJf_e}xb3RpDjlAUawU8dL!S`kBk=@X z`E{^V&;q)59o5_LB%JjH(Mou#fT5M}_IW}g#D)f1nWpeP+-#ai;((bn(`@U~gj#y) zA8(u3$V~zPd;$0=8$XDq6ro5zvh%NThg`w;+?{GjLpZ7N3Y-7>CDW&zFk|p8(5<(FTMv-^_m*IQ ztA7|jKzU-M(UOT6z}pJxPm=1d5ixYAN&&rz=)j%#5-hOy-~Q%{FTeUsw$y**pZ8Bc zeWH}3MFmeTkQT36N+3M2l>}2}fVF|jhW*fQOv(#>U@=L>0mLl;o^`v2|1H&U=@-5c zz=;O5RDq_Z{EJI)-`3hWRSbS&Lz7B54YwhL>1n(y@?7-tDKOUBN{LTrC`f}_JIk$f zFscMSMuT$l;KmRToKS4c9Kg2p^f_guAfMq3fx&4CNBbSMdO4q-HX{N%T<{JhO(|s} zFWBr@pXiek_@6!#4QbkjM%-w!n z=Z5mH5fG8VjVLjoQ)KZ^;vvP>>Gk0<1rim;S&SV}hBc&k6%|6-^C4WzAuB|fWt~M7 z6P0onYe;dswYJ)Tk``xtY1-#nEOGwl5&QuwbD}-? zOM{(wPM!3XhEGgEdHkL`ns-vL@Ol;JPjof=N`gzbRrG9 zq~dbqSZ)%TQQO$hWsnA~g|a5>utBMzGB{VLM>X+(>fJJS5b*2#0Y%ssSF4xC5CVRa zZ|DfM5}D}}?2Dj8q|2R0L>djS#AOm2yFqWKLs81F}8)LZ=Sd z;<-dgJl}wW0^LUDG|12n7t*$~W_71dbH)$_pHinN zD&~GA`waUcUdP@#D_sT6-YjUgM}rC;xMZbnFYZSO2FbRIS{yG{0L07s5q7u`G@J4x zY}W%_l5SfSTSAJFasw@$Tw;hYfJ>d-<=N~hX2_Q)W3M|uLV{eHo^J~&CHoO#_&7dO zDU+M>KWDGPtylVC<6BUumM=}?c7wW#@@^@*gQl52q;n)_52nv9`ju`$8}h0(A9Wqh z4qz*AFIhZ_#8@>iS9Je#hG;Aoo-4KinS*AnGt3%lk#L+!u|PPW<-|eLjtDmUt-$N( z2#Bj@E~*3I4B>T4BjSWqWhenPU3g&;03x~x>y6tpGLRv3+O8Nfc1RlT_2G}TtJ-|) z?lrPL%FY}G)Z4LZ@m^Vfme=*4%7I~a(ZN}UPnjlD&;fS`;nDJv(Rqy=!_me`6;G}TlsRg=I-}qYy2wZZggyKbO5%ur%ECgd+um2RYvt|P zL!2UEj#^VjI7fSyRqpBu_pu1M4MU=|olnII`mcV=NDW9jJ@|HFkq0=04`FC7vf~))0#;yEk>z-0EjeLn$xv@C?l*f;p-QCk^p}>e zvl2o$ebEUfh9kss$cqG@R!TBAj8gwnmL|a{vfqFw zuLc!(MUkC^=lU(&P2~#~qhNsF8>N2&^>7qV^6T+RCrYuS4Kv^fHt{;OV^I|}_=;Fg zGaxXvLBIGT1>;JF%*`AXp%VPdXkR=?!ElDC2+{8%mru`U;=I~Q!o{y^K?rDtc&d_7 zXY8yV-V1qGLv)SW>eTV7?y2U!LCVj~9LiJV7V{zRwRp-5h)5K*pwuI>wUrCyK|3NM z(R(SkNQT5uT%Ds}#2pBLs;e#ftOFUPmz>r<#{N}m9}q7lX&ZA`>RAU8!P}{m`6v}GlpYpa6AOF;+EiqcTpdE(1sdyv2euL>o-_$x@C4QoXfGwOyj%L~zN)ix zCkK`?_YxYp{hf;I(l@-LG5P9KDo*Il35kjfW57IodPFGV1-<6&ry^e_%USCn5*fUZ7C#vH3) zdR*=HMAB+RC}v!{yuLhpLtQVzspIV}=l^|>c-~i}bVE$kLF-6+;uk!WQ8-6!(0m5@ z5>W>yiPdd%WUYuWI1<=k1^AR+qRS=?po~KhYB%2k<6)s&y)^hBnxKWo>NBX6n@iix z8%dn;J32#Ie6A+czpN?2C1Z{O!WCK(X(*#P#9YZA$P*)Jg(2zk8a%SFOnN?{j%(cz zHj~;B+0p%}>=1L*R+yc)^|uLKgEAmGLxot4a6q8)*p9Bm9Aan+)O#Z_N0x=@#T-$$ z?nXp#Q|+iY5$BXrp)EV2?f8&?M5aMSKIWjmQH{&kjVN4nC=?F2WL|N_ zX@5WrQVF-P}GF-J>lP7nyE^rs@Zt90KD z6LUZf*kS>xZ{L9AT6aLKArS*mC|B6v%WVH#a_)yKz~j4S5w(vku#hwqUuy>IZxpLV z^f7z7B!ce6B8XZSeF#T{OXM+Jz*BdGUY;h6hhvBJmbV&}@pR_axDqSD@v9Rl3;VK( z*e%>m-9F}0o2XOe^H@+Ic32%gdoC(=i$i~S`_Q8;hNP89sWT}~c&Rs(ctis8D)s(L zi+9zD-RdL4(7j-&Jq0|FM-+(N8bdG~L;OqNg=0 zIYZ;yd%~4$Vz;28YSu#D5XXv&ne$x98L``VAhIbKK3qMgRBo#0w}GXs)$`XWel*hh zPdYf93PgA$Vf8%iK%@>nJX5i>B@0zF1Tr&O)-YK?k0Q zE6p-(?n7lnTEr1ulX4X+M>FU^OZ}Eu#I&YI7_x{ zXn=5i?Itdz4=8{1+;9bK!x6tx7PdY^b#WK8r*0bwT@qR%P(^ikbrEQ=>7}-Hy!1vH zh5Qg@LU%K?yyYBf`*J z5bE2DepiU9?8UG|cL>g#mb^N&b`=A*gUt;}|=o|jnNP)nu& zQRvm?O@Wfr*A{bp_g`=Ryi)S^dvOo4mCGM*7HeY;4%BR!>Os)Ied$6u%TF>^nwAK4 zQ?ni1+)aZdT9$Mgv>z|11Pc0c1cTcE-k>Y zI(sWcTs6-`EVkAmAX|N90R1=743cLbuwG z`RD0VS?xJ9^>$&0n1h;cQ%ZMJPpTbJEaor+k`haJ{0n1_i^vyaNEBT5HI~C`Y_mJ1 zm?JZF%p6p-Qez6hez}=*F-PhgVMfFey{Ji3#~cmpY%zy1B%&(?j2{)j+=x_>D^>p2 zO+ps;or;8b-Av5Uf_AJi6pY9PQ92;B0LCE$y_1-OQ)ZQ+>{>tz0Nud+IRf(WiE4Q+ z=Fnot1&^^dhaJ#rY)zYu*J%#hyc?mYO6VTEuUU`5gQ(9^smG9JOhBsE9UEk#I1yAv zY_+-(%oCUHohy&G;#az7G>z@vFS1u%hdjw9cZ)2EeyahYw%(mU!u;?y2-zIhG|#RB zETBof1f9hGHR-NDM{gl}>+%$odt1@M9obw<;u@7Pd}^5+J4o`jrGvSscvO}C<7ng6 z5Y-Kbr}3Br%V%3w;*_|?@z7@s2{<=qXtT3xCHc32CRy zfxJdQ-57%O9{|(Bpo!4A%4zrqh@F7C8lo9aaV>Q90dy?fG&*Vqu`o@JVe%koJvqh)FACv830>NIS zK?WQryW2})LxKhfeR4^e9E!LF&WqTsy@&A9`5J!1{!o}ATp4dJHP&c!0)=pbcVRxs zRCYYr*rQrBdZ&3VGuRaJq_qUS)PhIFj}D`pFH0?)!HQjavy35%0EtZYhH9zYlp;rF zJuw?Y@GAg~0ZFMobP!PC-7Gy5T?7>yqDLfsy0U{><@a90XzSR(U>%IEi@n}?; z@`!G$Za^SQcEM*vvWjrAYDE4q+3`f2Az6jHNair0ZlEj;==HHeyEO!du_@uEGSad; z$*Sy#Ff+5N_mgyAX(K5SrTdJ{7!g^RgqZSLdu+%{4=`LDZo7<3Ur=F+V%Qy zl2w+9=EC6HEb*_o4$1b%>VMzSjIN3`kdhahjbMt=h`K(4ZT#gyC=j_%~Xy9VyVGY1{j^3te&sU zV792k8{IDY%tq0_E*Pq;N=Cx~yEc%;Z1xrEW1gxD<%$kRKX*4#aL?~UMZvA*U!VRx zSy*s}MDMpy<2=8Gvx=x{DfHDDlB01H@#EEgE)+$kI0l7znQia%#-Kh6+5 z)+V>G2(z+zyw5jIq38W{>b#j_L`c$S4M~wHa^m_$q$Nkfoy$9^6^w4bgpUUl44tT8 zXuR=hLLhh3rFy*T-8O3)3OLWrAZ@$h#r*^EtjZ}-u_eR5ceYvo5J^L&>KIz{?w|dQ zSayEO*=Xh3XhHP#^%6Z>SC1h}LoRW~wV?o1Ho3Gs(?i`to+L#fUzoT}={A?-mS6<*5+O)U)2> zrEeV&?6fXy7rzAl?rOQj7!skalH)K(^5y9fPNc4NnK?KU1u|D>$mpqHW(hPQ0}t*D z%>)_w6Kv0$P<15Pi7QdZu;dJ#I%L)1)9v5h;1Q+zdsnshwgc*@NQ*APl7RstT=2k% z!U0KNo3w{oL|(e96iSJnecXryG?$+_y0`3t45?%M{*BE?mHz#~$A(imYe)F5JzWny{f1x9)`||3lKGD)IT#S#{+JxHX|0}y4=W9#yLnd1! za!FVrbokQs3_V*=&j5J_L}|YivyLPZXrvs85H^|yN3z8WqGCHDRp?+*ObK4&j-a9S z4>Cy~cQXx{wo-(C2OZso;7n#wK78qZ$FXejiCiJd7`G^c=!EZ#caY}mhe#Jg$X{)L z6F{q)S6U7M+Yylc?_xdnK)z))YQ*qAXSs%3Ad z!z&#CZfUquHKO!ckeNfLpcJ2x$u@RMfLpV71lzlPL}(w;lx4rvgPH3J+`ByB)*2c_ zrllZ3nH5q@9@kgEtu-{VzZcd5FH+Sb%FY~;M23i-0o?LtRvF4HI&E^p;X3_@1aRA5 z(Hb!`N7qwnwVN>{cIc*aA^`a~?l_d>r#;3AJ+3(pdc_)c_|O2o6~_VW=M@|W?VJWM zqcdlVU4&=)C1bnW1i!Fa+ zbt4b*F&P?If*bd{p&TpspDC1E&p^#W?honDT_O^aOY^kdpG6_}r=6`cS>r!M(k`*` zWja-M@K$)0YtdB3&VCbANjo(}CLMR1e#Kb%Eq&Q*JYsf0nn-u+BQ9yzR&GuuLvsl` z&-1Suk#SV}sUvVSkB9^}o&O3m7s-xDa%A~&3L^2Bei~*qMESkosLs(FsPye{-)=kGSJ04O+zCvbK^5(00`Pu&&br!&O19?dcCaNlRvLXbA_3>{h%g=Y!BGvIgs`Q|Tb zqL4e$ii;`y0AxoNzvxG-Ox4h!b)f;s^zwizGm})z2inU9=h^dqBol%FTmTQX)yH+8 z7oNAYDlxeTHm#K=TO6=6(fT5ypQx_9;s!`+Jom>K$ zDjgAiaW1J}a+0cfM2RTGthpX)`cgt0Tk*alozFMv6{D|PD7uEv#J3pIs zU8YJa<{FSZipWKdX$?fSCQl@?UOt0`VjgR=ZG@6G@bs7ddre@6gM|5DAjfN%{(D z+zm*g{C%u*)XTkH%F3-XC%ugeX=y_@d{8gfjfga3ssWvCqmoq`_VjX7-b2>{L<{gC zdGAsAh|Ec1JxDvfTsxwX;&v$pse~u(3^`@Zb#ap`n-PP~>*Zd{VVSkSfwt-Qi_Rlj ztWBP((9`LZfq6uLrwb`(5hC&dS0AX!6WgWe1hXIe8Ws)%0>;Os6n)hB|m3=sLb`{31Hi5`m2H zS?#r(`6YhF`Vi@^Dx^cUYR5xV4j}0?{FpPzU%|KFeLGiErJk0B^rIR%AcK?8__V-u zfs#M$L=2=~42e8ty5^`IZ7Uejf?JAuD@u+?(qMbc6uvpSt8MQ+cPBtfULba+?YybcMUyn<8WUjvVzvTkd A)Bpeg