diff --git a/qlearning-results/a0.1-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..8098db4 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:53:51,825 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.1 +202.9900000000003 +203.45950000000053 +203.8405000000007 +204.57150000000019 +204.51700000000048 +205.23050000000032 +204.14600000000027 +202.9190000000006 +205.70500000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,098 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,306 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,529 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,705 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:14,898 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:15,090 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:15,329 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:15,537 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:54:15,762 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 19:54:15,969 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.1-g0.1-e0.1-qlearning b/qlearning-results/a0.1-g0.1-e0.1-qlearning new file mode 100644 index 0000000..c313de8 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:47:13,417 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.1 +220.95810000000168 +218.19660000000184 +216.98390000000128 +216.32795000000155 +216.2846500000026 +215.670950000002 +215.56090000000168 +215.49235000000138 +214.88230000000195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:39,974 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:40,517 INFO [tetris] Final score: 182 +Lost due to: LockOut +2020-04-20 22:55:40,757 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:42,069 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:55:42,438 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:43,814 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:55:45,093 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:55:46,614 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:47,941 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:55:49,078 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.1-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..8fcfb38 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:54:16,016 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.25 +199.9395000000003 +197.7290000000002 +198.4160000000001 +198.15649999999977 +198.8634999999999 +197.55950000000018 +197.8339999999998 +197.34150000000017 +197.9950000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:39,280 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:39,584 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:54:40,432 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:40,640 INFO [tetris] Final score: 158 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:41,008 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:54:41,216 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:54:41,568 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:41,840 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:42,112 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:54:42,304 INFO [tetris] Final score: 208 diff --git a/qlearning-results/a0.1-g0.1-e0.25-qlearning b/qlearning-results/a0.1-g0.1-e0.25-qlearning new file mode 100644 index 0000000..b9ef54f --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:56:02,392 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.25 +222.2459000000013 +219.66315000000242 +218.52945000000193 +217.64080000000234 +217.4608 +217.40135000000208 +217.14465000000126 +217.1419500000022 +216.6491000000021 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:07,085 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:07,565 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:08,444 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:08,732 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:09,453 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:05:10,236 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:10,989 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 23:05:12,061 INFO [tetris] Final score: 267 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:05:12,957 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 23:05:14,397 INFO [tetris] Final score: 218 diff --git a/qlearning-results/a0.1-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..4481ea7 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:54:42,317 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.5 +196.97650000000007 +193.87299999999973 +192.38500000000002 +192.71700000000013 +193.4105000000001 +192.64049999999997 +192.89299999999992 +193.16400000000004 +193.17149999999995 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:07,820 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:08,397 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:08,764 INFO [tetris] Final score: 142 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:09,149 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:10,188 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:10,557 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:10,956 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:11,356 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:11,773 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:12,044 INFO [tetris] Final score: 166 diff --git a/qlearning-results/a0.1-g0.1-e0.5-qlearning b/qlearning-results/a0.1-g0.1-e0.5-qlearning new file mode 100644 index 0000000..9a7d92b --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:05:31,376 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.5 +221.98790000000076 +220.2209500000016 +219.2323000000003 +218.9858000000025 +218.80650000000216 +218.30300000000062 +218.24995000000052 +217.698150000002 +217.9902500000012 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:19,306 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:19,977 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:20,889 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 23:14:21,529 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:22,538 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:23,673 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:24,457 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 23:14:25,113 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:25,786 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:14:26,777 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.1-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..7366047 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:55:12,057 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.75 +199.317 +198.07700000000025 +198.324 +198.93750000000006 +199.0355000000004 +199.3629999999999 +199.24750000000017 +198.76299999999966 +198.59600000000023 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:44,123 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:44,636 INFO [tetris] Final score: 118 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:55:45,771 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:46,795 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:47,532 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:48,252 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:48,892 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:49,291 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:55:50,076 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:55:50,860 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.1-g0.1-e0.75-qlearning b/qlearning-results/a0.1-g0.1-e0.75-qlearning new file mode 100644 index 0000000..fbd2d45 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:20:30,784 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.75 +222.8502999999997 +221.56735000000256 +220.65415000000044 +219.97580000000033 +220.14905000000118 +219.751050000002 +219.8127000000007 +219.5739499999999 +219.68790000000163 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:27:54,362 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 23:27:55,294 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:27:56,621 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 23:27:57,326 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:27:58,494 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:27:59,534 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:28:00,414 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:28:01,518 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:28:03,085 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:28:04,637 INFO [tetris] Final score: 236 diff --git a/qlearning-results/a0.1-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.1-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..a17230c --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:55:50,868 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.9 +206.9339999999998 +208.44249999999934 +207.38549999999978 +207.54550000000023 +208.83049999999997 +207.53250000000017 +208.2755000000005 +209.32850000000042 +209.90800000000064 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:56:31,530 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:32,745 INFO [tetris] Final score: 194 +Lost due to: LockOut +2020-04-20 19:56:33,993 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:35,017 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:35,689 INFO [tetris] Final score: 168 +Lost due to: LockOut +2020-04-20 19:56:37,497 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:38,490 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:39,177 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:40,073 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:56:40,698 INFO [tetris] Final score: 224 diff --git a/qlearning-results/a0.1-g0.1-e0.9-qlearning b/qlearning-results/a0.1-g0.1-e0.9-qlearning new file mode 100644 index 0000000..1edab43 --- /dev/null +++ b/qlearning-results/a0.1-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:28:22,227 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.1, exploration_rate = 0.9 +223.1952000000007 +222.9115500000005 +222.5591000000004 +222.30110000000096 +221.7994500000013 +222.20040000000213 +221.98400000000007 +221.9154000000008 +222.00440000000148 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:25,482 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:26,966 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:27,911 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:29,047 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:30,151 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:32,390 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:35:33,254 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:36,455 INFO [tetris] Final score: 388 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:37,463 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:35:39,511 INFO [tetris] Final score: 282 diff --git a/qlearning-results/a0.1-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..043bf78 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:51:03,272 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.1 +202.91550000000058 +201.90000000000043 +204.79149999999984 +203.40600000000023 +204.6635 +202.65850000000034 +204.23150000000106 +207.42250000000072 +203.14750000000012 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:24,270 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:24,494 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:24,686 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:24,862 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:25,039 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:25,231 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:25,423 INFO [tetris] Final score: 197 +Lost due to: LockOut +2020-04-20 19:51:25,615 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:25,823 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:26,015 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.1-g0.5-e0.1-qlearning b/qlearning-results/a0.1-g0.5-e0.1-qlearning new file mode 100644 index 0000000..d7227bb --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:03:08,902 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.1 +218.95680000000175 +216.40600000000256 +215.8475000000029 +216.2365500000022 +215.53205000000244 +215.29070000000203 +215.55760000000194 +215.17585000000147 +215.32435000000112 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:39,795 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:41,267 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:41,747 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:42,706 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:11:43,955 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:11:44,387 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:11:44,803 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:45,410 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:45,811 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:46,515 INFO [tetris] Final score: 206 diff --git a/qlearning-results/a0.1-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..8220391 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:51:26,021 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.25 +200.25600000000043 +199.62100000000024 +196.92050000000043 +199.74200000000064 +199.07450000000014 +199.32550000000037 +199.4075000000002 +197.92750000000015 +197.7249999999997 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:48,554 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:49,834 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:50,986 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:52,058 INFO [tetris] Final score: 136 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:51:52,474 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:51:53,802 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:55,722 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:51:56,202 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:56,809 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:57,801 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.1-g0.5-e0.25-qlearning b/qlearning-results/a0.1-g0.5-e0.25-qlearning new file mode 100644 index 0000000..c0bee5d --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:11:59,702 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.25 +221.37790000000103 +219.74895000000214 +218.8575500000007 +218.10520000000105 +217.6736000000012 +217.39030000000125 +217.24575000000306 +217.0567500000027 +216.70315000000298 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:21:03,153 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:04,064 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:04,993 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:06,465 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:07,361 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:07,937 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:09,217 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:10,177 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:21:11,281 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:12,033 INFO [tetris] Final score: 224 diff --git a/qlearning-results/a0.1-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..39306e2 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:51:57,814 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.5 +197.60850000000073 +194.93899999999988 +192.84999999999997 +194.20399999999952 +193.8585 +193.10500000000013 +192.72149999999954 +193.21349999999984 +193.71550000000025 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:22,269 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:22,605 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:22,814 INFO [tetris] Final score: 144 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:23,341 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:23,757 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:24,125 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:24,782 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:25,102 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:52:25,518 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:26,333 INFO [tetris] Final score: 230 diff --git a/qlearning-results/a0.1-g0.5-e0.5-qlearning b/qlearning-results/a0.1-g0.5-e0.5-qlearning new file mode 100644 index 0000000..1d71e64 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:21:26,605 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.5 +222.22215000000207 +220.20200000000062 +220.06675000000146 +219.06850000000324 +218.8581500000011 +218.28170000000168 +218.65165000000067 +218.28869999999947 +217.98975000000075 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:30:10,316 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:11,339 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:12,492 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:13,420 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:15,260 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:16,060 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:16,508 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:30:19,084 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:20,124 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:30:20,956 INFO [tetris] Final score: 207 diff --git a/qlearning-results/a0.1-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..6ce2f3e --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:52:26,341 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.75 +199.8565000000001 +198.30050000000028 +198.6645000000003 +199.38399999999982 +199.6905000000004 +198.73249999999976 +198.10299999999992 +198.96399999999988 +199.12200000000013 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:57,169 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:52:57,856 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:52:58,656 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:52:59,025 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:59,488 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:52:59,840 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:01,024 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:53:01,617 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:53:02,049 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:02,593 INFO [tetris] Final score: 180 diff --git a/qlearning-results/a0.1-g0.5-e0.75-qlearning b/qlearning-results/a0.1-g0.5-e0.75-qlearning new file mode 100644 index 0000000..19ae04c --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:30:37,639 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.75 +222.70830000000194 +221.62445000000056 +220.67714999999978 +220.32700000000204 +220.72685000000172 +220.04535000000004 +219.66020000000054 +219.8325000000021 +219.15890000000252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:38,738 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:40,721 INFO [tetris] Final score: 293 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:38:42,370 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:43,922 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:38:46,322 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:47,393 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:48,002 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:38:49,170 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:50,594 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:38:51,346 INFO [tetris] Final score: 225 diff --git a/qlearning-results/a0.1-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.1-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..2669e78 --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:53:02,605 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.9 +207.45600000000007 +207.617 +207.4425000000002 +208.28600000000034 +210.19349999999963 +208.85100000000028 +207.92550000000023 +209.53050000000053 +207.58600000000015 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:43,157 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:43,925 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:45,140 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:45,797 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:46,756 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:48,197 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:53:49,413 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:50,101 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:53:50,852 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:53:51,813 INFO [tetris] Final score: 179 diff --git a/qlearning-results/a0.1-g0.5-e0.9-qlearning b/qlearning-results/a0.1-g0.5-e0.9-qlearning new file mode 100644 index 0000000..a55862d --- /dev/null +++ b/qlearning-results/a0.1-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 22:39:09,115 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.5, exploration_rate = 0.9 +223.29540000000083 +222.6900000000015 +222.5335000000025 +221.9808500000014 +221.26825000000136 +221.8926500000009 +222.22910000000022 +221.34840000000182 +221.69575000000344 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:43,359 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:44,127 INFO [tetris] Final score: 145 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:45,151 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:47,232 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:48,767 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:46:50,736 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:46:51,455 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:46:52,847 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:54,031 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:46:55,039 INFO [tetris] Final score: 197 diff --git a/qlearning-results/a0.1-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..4b16379 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:48:51,558 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.1 +202.6465000000004 +202.39400000000015 +202.71350000000058 +203.02800000000005 +203.03899999999987 +203.0555000000006 +202.93950000000052 +202.5405000000001 +202.92950000000036 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:04,774 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:04,950 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,191 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,398 INFO [tetris] Final score: 208 +Lost due to: LockOut +2020-04-20 19:49:05,590 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,767 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:05,942 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:06,118 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:06,310 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:06,503 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.1-g0.9-e0.1-qlearning b/qlearning-results/a0.1-g0.9-e0.1-qlearning new file mode 100644 index 0000000..70cf2bc --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:55:09,252 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.1 +217.85775000000078 +214.23900000000074 +213.86090000000135 +212.71030000000212 +212.2356500000019 +211.60060000000047 +211.6340000000002 +211.5668500000007 +211.38930000000192 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:03:51,834 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:52,618 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:03:54,201 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:03:55,625 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:03:56,745 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:03:58,217 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:58,825 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:59,337 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:03:59,817 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:04:00,506 INFO [tetris] Final score: 190 diff --git a/qlearning-results/a0.1-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..3b9d3d1 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:49:06,512 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.25 +200.03150000000065 +200.79300000000003 +200.47100000000017 +200.0220000000006 +199.55649999999997 +200.4180000000004 +201.05850000000044 +200.04650000000038 +200.06300000000036 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:49:20,634 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:49:20,890 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,130 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,354 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,594 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:21,834 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:22,042 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:49:22,267 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:22,490 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:22,746 INFO [tetris] Final score: 187 diff --git a/qlearning-results/a0.1-g0.9-e0.25-qlearning b/qlearning-results/a0.1-g0.9-e0.25-qlearning new file mode 100644 index 0000000..6b3fdbf --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:04:13,262 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.25 +222.82825000000219 +220.54200000000077 +219.28270000000148 +218.53440000000182 +218.82440000000096 +218.7121500000015 +217.93305000000066 +217.89815000000158 +217.86430000000198 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:13:24,753 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:13:26,065 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:26,720 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:28,032 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:13:28,432 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:29,712 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:13:30,880 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:31,744 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:33,776 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:13:35,280 INFO [tetris] Final score: 227 diff --git a/qlearning-results/a0.1-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..dab391a --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:49:22,757 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.5 +197.83150000000063 +197.5155 +196.53200000000007 +197.7870000000006 +197.4235000000002 +198.2155000000005 +198.35950000000005 +197.71749999999992 +197.63600000000034 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:39,931 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:40,251 INFO [tetris] Final score: 162 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:49:40,603 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:40,907 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:41,179 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:49:41,435 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:41,755 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:42,011 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:42,427 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:49:43,307 INFO [tetris] Final score: 297 diff --git a/qlearning-results/a0.1-g0.9-e0.5-qlearning b/qlearning-results/a0.1-g0.9-e0.5-qlearning new file mode 100644 index 0000000..cdc8056 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:13:52,160 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.5 +222.20530000000127 +221.69330000000247 +220.42010000000138 +220.16220000000072 +219.9176000000008 +219.27640000000164 +219.26185000000027 +219.42875000000203 +219.08700000000275 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:36,915 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:38,339 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:38,690 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:22:39,362 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:41,331 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:42,179 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:22:43,074 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:45,251 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:46,883 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:22:47,715 INFO [tetris] Final score: 233 diff --git a/qlearning-results/a0.1-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..81a135c --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:49:43,324 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.75 +200.32500000000013 +200.8425000000005 +200.43950000000055 +200.54100000000017 +200.49400000000034 +200.66400000000002 +200.5815000000002 +199.424 +200.4714999999997 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:09,866 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:10,250 INFO [tetris] Final score: 152 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:11,114 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:50:12,122 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:50:12,730 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:13,482 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:14,586 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:15,610 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:50:16,554 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:17,419 INFO [tetris] Final score: 247 diff --git a/qlearning-results/a0.1-g0.9-e0.75-qlearning b/qlearning-results/a0.1-g0.9-e0.75-qlearning new file mode 100644 index 0000000..620c422 --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:23:03,999 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.75 +223.42030000000008 +222.7879500000012 +221.70360000000295 +221.8445500000001 +221.79939999999996 +221.50765000000143 +220.82935000000086 +221.18140000000034 +221.43530000000243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:09,656 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:11,464 INFO [tetris] Final score: 281 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:12,808 INFO [tetris] Final score: 283 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:31:13,943 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:14,920 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:16,248 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:18,392 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:19,592 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:20,391 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:21,335 INFO [tetris] Final score: 225 diff --git a/qlearning-results/a0.1-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.1-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..4cfdc8e --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:50:17,433 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.9 +208.73600000000016 +209.4225000000004 +209.70750000000032 +207.65349999999984 +207.76400000000055 +208.6115000000001 +207.88149999999996 +207.8700000000006 +209.03950000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:54,363 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:50:55,243 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:50:56,059 INFO [tetris] Final score: 236 +Lost due to: LockOut +2020-04-20 19:50:57,324 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:50:58,987 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:50:59,980 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:00,763 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:01,403 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:51:02,603 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:51:03,259 INFO [tetris] Final score: 236 diff --git a/qlearning-results/a0.1-g0.9-e0.9-qlearning b/qlearning-results/a0.1-g0.9-e0.9-qlearning new file mode 100644 index 0000000..cd8c4bd --- /dev/null +++ b/qlearning-results/a0.1-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 21:54:52,451 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 0.9, exploration_rate = 0.9 +223.64375000000032 +223.3694500000013 +223.38300000000066 +222.72075000000126 +222.8378500000019 +222.92165000000165 +222.5837000000008 +222.96700000000087 +223.00550000000015 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:36,064 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:37,489 INFO [tetris] Final score: 310 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:02:40,305 INFO [tetris] Final score: 307 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:02:41,089 INFO [tetris] Final score: 157 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:42,129 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:44,000 INFO [tetris] Final score: 308 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:46,160 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:02:47,168 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:49,024 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:02:50,288 INFO [tetris] Final score: 291 diff --git a/qlearning-results/a0.1-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..58487a2 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 15:49:27,904 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.1 +346.7053500000016 +374.44729999999885 +371.84564999999986 +372.5076500000012 +372.05924999999826 +373.353400000003 +372.66549999999904 +372.52020000000164 +372.8893 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:04:21,601 INFO [tetris] Final score: 478 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:04:32,017 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:04:52,545 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:05:31,713 INFO [tetris] Final score: 470 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:05:54,848 INFO [tetris] Final score: 295 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:06:23,569 INFO [tetris] Final score: 330 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:06:51,568 INFO [tetris] Final score: 353 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:07:48,178 INFO [tetris] Final score: 461 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:08:11,680 INFO [tetris] Final score: 373 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:08:39,793 INFO [tetris] Final score: 338 diff --git a/qlearning-results/a0.1-g1.0-e0.1-qlearning b/qlearning-results/a0.1-g1.0-e0.1-qlearning new file mode 100644 index 0000000..b18bb48 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 15:38:27,993 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.1 +218.93345000000153 +215.35020000000193 +214.6898000000031 +213.1617000000016 +213.27190000000175 +213.64700000000198 +212.96310000000136 +212.57775000000174 +212.41390000000064 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:01,102 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:02,094 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:03,965 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:04,574 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:05,421 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 15:49:06,574 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 15:49:07,886 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:09,262 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:12,030 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 15:49:13,918 INFO [tetris] Final score: 265 diff --git a/qlearning-results/a0.1-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..4244708 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:36:42,460 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.25 +200.03349999999998 +251.34600000000006 +291.4300000000002 +289.1145000000006 +294.6325000000002 +299.0275000000002 +297.8514999999994 +299.59449999999987 +300.00349999999935 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:42:08,739 INFO [tetris] Final score: 268 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:14,179 INFO [tetris] Final score: 314 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:20,643 INFO [tetris] Final score: 373 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:42:29,363 INFO [tetris] Final score: 277 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:42:37,651 INFO [tetris] Final score: 282 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:44,195 INFO [tetris] Final score: 293 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:42:52,179 INFO [tetris] Final score: 289 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:42:57,507 INFO [tetris] Final score: 325 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:43:06,675 INFO [tetris] Final score: 383 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:43:15,155 INFO [tetris] Final score: 341 diff --git a/qlearning-results/a0.1-g1.0-e0.25-qlearning b/qlearning-results/a0.1-g1.0-e0.25-qlearning new file mode 100644 index 0000000..1f8265c --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:09:03,827 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.25 +222.20209999999997 +219.74505000000042 +218.25350000000049 +217.56200000000186 +217.54045000000082 +216.33019999999962 +216.22405000000035 +215.58710000000076 +215.40420000000307 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:23,140 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:23,635 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:19:24,835 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:19:26,115 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:19:27,811 INFO [tetris] Final score: 300 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:19:28,291 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:28,707 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:29,379 INFO [tetris] Final score: 267 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:30,947 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:19:31,955 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.1-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..a9841e7 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:43:15,167 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.5 +199.35850000000056 +246.6900000000001 +252.32150000000004 +255.94250000000014 +256.25550000000027 +259.1924999999998 +257.8860000000002 +257.5220000000002 +256.3155 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:27,543 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:45:31,111 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:34,967 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:37,351 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:40,007 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:45,655 INFO [tetris] Final score: 359 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 19:45:50,471 INFO [tetris] Final score: 303 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:45:52,918 INFO [tetris] Final score: 260 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:45:54,823 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:45:57,287 INFO [tetris] Final score: 241 diff --git a/qlearning-results/a0.1-g1.0-e0.5-qlearning b/qlearning-results/a0.1-g1.0-e0.5-qlearning new file mode 100644 index 0000000..1111b5b --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:19:47,925 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.5 +222.31680000000145 +220.51680000000223 +219.2328500000012 +218.89920000000134 +218.72694999999985 +218.28585000000157 +217.96815000000126 +218.5306000000002 +218.48100000000122 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:03,621 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:04,629 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:06,149 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:06,949 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:08,404 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:09,109 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:10,996 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:12,357 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:29:13,477 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:14,277 INFO [tetris] Final score: 199 diff --git a/qlearning-results/a0.1-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..eabfed8 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:45:57,301 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.75 +213.57999999999984 +232.12850000000046 +236.8270000000001 +238.69949999999994 +236.26400000000027 +237.19350000000054 +237.26200000000057 +234.72600000000028 +237.0454999999998 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:15,705 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:18,809 INFO [tetris] Final score: 291 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:19,481 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 19:47:22,297 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:23,321 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:25,162 INFO [tetris] Final score: 260 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:47:27,097 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 19:47:29,337 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:47:30,857 INFO [tetris] Final score: 273 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 19:47:33,145 INFO [tetris] Final score: 207 diff --git a/qlearning-results/a0.1-g1.0-e0.75-qlearning b/qlearning-results/a0.1-g1.0-e0.75-qlearning new file mode 100644 index 0000000..390f6bf --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:29:30,884 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.75 +223.4209 +222.43200000000067 +221.8176000000021 +221.0938500000012 +221.02510000000146 +220.63270000000313 +220.69475000000156 +220.55210000000224 +220.20850000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:50,506 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:52,203 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:53,211 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:37:53,931 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:55,372 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:56,251 INFO [tetris] Final score: 141 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:57,787 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:59,659 INFO [tetris] Final score: 300 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:00,524 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:01,675 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.1-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.1-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..aa475ef --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:47:33,158 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.9 +219.51950000000062 +229.94900000000015 +229.59200000000027 +228.4244999999997 +228.53999999999974 +226.99699999999993 +229.38000000000034 +226.4289999999998 +228.8485000000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:35,817 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:37,449 INFO [tetris] Final score: 266 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:38,345 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:39,161 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:40,681 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:42,713 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 19:48:44,745 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:47,337 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:48,745 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 19:48:51,544 INFO [tetris] Final score: 251 diff --git a/qlearning-results/a0.1-g1.0-e0.9-qlearning b/qlearning-results/a0.1-g1.0-e0.9-qlearning new file mode 100644 index 0000000..6bbff11 --- /dev/null +++ b/qlearning-results/a0.1-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-20 20:46:50,904 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.1, discount_rate = 1, exploration_rate = 0.9 +224.01290000000077 +223.2799000000014 +222.90805000000123 +222.36550000000076 +222.8054500000009 +222.31225000000111 +222.2265000000006 +222.38505000000137 +222.1896500000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:37,200 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:38,317 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:54:39,998 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:41,789 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:43,133 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:44,350 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:54:46,574 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:47,421 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:48,766 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:54:49,437 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.5-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..ae91c93 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:42:24,169 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.1 +203.37650000000005 +203.32950000000062 +205.2715000000001 +204.56650000000042 +205.21499999999992 +204.48100000000048 +203.35300000000078 +205.1395000000002 +204.1155 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:52,667 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:52,891 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,099 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,307 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,483 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:53,691 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:42:53,883 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:54,091 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 20:42:54,284 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:54,475 INFO [tetris] Final score: 184 diff --git a/qlearning-results/a0.5-g0.1-e0.1-qlearning b/qlearning-results/a0.5-g0.1-e0.1-qlearning new file mode 100644 index 0000000..581b690 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:49:07,486 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.1 +220.52315000000206 +218.00770000000026 +216.9214000000011 +216.5741500000017 +216.04140000000086 +215.82124999999905 +215.89405000000198 +215.5497000000022 +215.38415000000222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:47,105 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:56:47,473 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:48,817 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:56:49,201 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:50,145 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:51,473 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:52,033 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:52,609 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 01:56:53,489 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:56:54,401 INFO [tetris] Final score: 216 diff --git a/qlearning-results/a0.5-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..99da7a6 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:42:54,489 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.25 +196.89799999999997 +197.32500000000024 +196.77949999999998 +196.7270000000004 +198.16650000000024 +197.20350000000045 +198.81850000000034 +196.93449999999964 +195.7664999999999 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:20,168 INFO [tetris] Final score: 155 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:43:23,208 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:24,664 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:27,224 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:43:28,968 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:43:31,128 INFO [tetris] Final score: 270 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:32,840 INFO [tetris] Final score: 275 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:43:33,481 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:43:34,456 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:43:34,968 INFO [tetris] Final score: 140 diff --git a/qlearning-results/a0.5-g0.1-e0.25-qlearning b/qlearning-results/a0.5-g0.1-e0.25-qlearning new file mode 100644 index 0000000..e357fd7 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:57:05,932 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.25 +221.65875000000136 +219.62845000000095 +218.3312500000014 +217.77255000000184 +216.97090000000227 +217.32470000000177 +217.11195000000083 +216.90805000000185 +216.57765000000197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:18,018 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:18,578 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:05:19,009 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:19,826 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:05:21,202 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:05:21,873 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:23,490 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:24,449 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:05:24,930 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:05:25,665 INFO [tetris] Final score: 185 diff --git a/qlearning-results/a0.5-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..8dcb9b0 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:43:34,981 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.5 +194.5400000000003 +193.8670000000001 +192.81949999999986 +193.60599999999968 +194.58000000000044 +193.0275 +193.3220000000003 +192.87450000000018 +193.6115 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:02,579 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:44:02,819 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:03,091 INFO [tetris] Final score: 143 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:03,795 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:04,659 INFO [tetris] Final score: 143 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:04,931 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:05,475 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:06,131 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:06,579 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:06,915 INFO [tetris] Final score: 188 diff --git a/qlearning-results/a0.5-g0.1-e0.5-qlearning b/qlearning-results/a0.5-g0.1-e0.5-qlearning new file mode 100644 index 0000000..778f1c9 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:05:39,081 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.5 +222.63780000000236 +220.23420000000027 +219.6870500000017 +219.27970000000136 +218.87424999999996 +217.92980000000009 +218.1931000000028 +218.29909999999973 +218.03945000000206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:36,335 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:37,935 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:39,727 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:13:41,039 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:42,240 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:43,471 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:44,415 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:13:45,008 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:46,160 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:13:46,607 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.5-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..a8731a7 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:44:06,927 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.75 +197.86499999999978 +198.33849999999993 +199.11550000000037 +199.1649999999996 +198.89099999999974 +198.57650000000004 +200.0650000000003 +200.02300000000014 +198.99549999999994 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:44:40,315 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:41,228 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:42,140 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:42,636 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:43,788 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:44,988 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:45,707 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:46,283 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:46,971 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:44:48,172 INFO [tetris] Final score: 184 diff --git a/qlearning-results/a0.5-g0.1-e0.75-qlearning b/qlearning-results/a0.5-g0.1-e0.75-qlearning new file mode 100644 index 0000000..e61d860 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:14:01,778 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.75 +223.03660000000102 +221.14205000000183 +220.64650000000265 +220.44305000000108 +220.33760000000007 +220.54309999999998 +219.56440000000123 +219.90739999999963 +219.63010000000193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:23,926 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:25,942 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:21:26,726 INFO [tetris] Final score: 141 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:29,062 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:30,822 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 02:21:32,134 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:33,254 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:34,407 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:36,022 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:21:36,950 INFO [tetris] Final score: 206 diff --git a/qlearning-results/a0.5-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.5-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..b05cc2d --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:44:48,188 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.9 +210.13300000000012 +209.122 +208.12050000000042 +208.88250000000016 +208.1355 +209.6869999999997 +208.66399999999985 +208.08800000000028 +208.9255000000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:29,675 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:30,778 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:31,803 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:45:32,363 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:33,227 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:45:34,251 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:35,162 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:45:35,835 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:45:36,682 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:45:37,498 INFO [tetris] Final score: 226 diff --git a/qlearning-results/a0.5-g0.1-e0.9-qlearning b/qlearning-results/a0.5-g0.1-e0.9-qlearning new file mode 100644 index 0000000..e7649a2 --- /dev/null +++ b/qlearning-results/a0.5-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:21:53,490 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.1, exploration_rate = 0.9 +223.46845000000124 +223.1418000000001 +222.22019999999972 +222.05500000000015 +222.0302500000011 +222.12285000000077 +221.99990000000147 +221.55780000000158 +221.98394999999928 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:28:52,171 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 02:28:54,875 INFO [tetris] Final score: 319 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:28:56,251 INFO [tetris] Final score: 199 +Lost due to: LockOut +2020-04-21 02:28:57,515 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:28:58,859 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:00,155 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:29:02,043 INFO [tetris] Final score: 339 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:03,131 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:04,251 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:29:06,363 INFO [tetris] Final score: 239 diff --git a/qlearning-results/a0.5-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..93fed9d --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:39:29,556 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.1 +205.1515000000002 +203.5880000000005 +204.79000000000022 +205.6105000000004 +204.66900000000066 +204.7145 +203.85100000000045 +204.1160000000002 +205.15300000000053 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:39:54,816 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,008 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,217 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,424 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 20:39:55,632 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:55,825 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:39:56,048 INFO [tetris] Final score: 198 +Lost due to: LockOut +2020-04-20 20:39:56,304 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:56,528 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:56,736 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.5-g0.5-e0.1-qlearning b/qlearning-results/a0.5-g0.5-e0.1-qlearning new file mode 100644 index 0000000..87c9462 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:08:55,755 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.1 +219.5724500000018 +217.2081000000039 +216.35635000000116 +215.82600000000267 +215.68915000000212 +215.25045000000264 +215.66660000000238 +215.1686000000017 +215.74980000000153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:34,121 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 01:16:34,841 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 01:16:35,784 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:37,081 INFO [tetris] Final score: 273 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:16:37,480 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:16:38,264 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:16:39,305 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:39,993 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:40,873 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:16:41,721 INFO [tetris] Final score: 196 diff --git a/qlearning-results/a0.5-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..1fdcea6 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:39:56,747 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.25 +198.71550000000005 +197.91450000000012 +198.36000000000013 +198.0735 +197.2970000000005 +197.83399999999992 +198.572 +197.56950000000018 +198.58100000000022 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:40:21,258 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:22,697 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:23,017 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:40:23,337 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:24,169 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:25,785 INFO [tetris] Final score: 304 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:26,986 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:27,241 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:40:28,425 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:40:28,745 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.5-g0.5-e0.25-qlearning b/qlearning-results/a0.5-g0.5-e0.25-qlearning new file mode 100644 index 0000000..0219635 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:16:53,056 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.25 +222.49660000000034 +219.73660000000217 +218.96715000000236 +218.05695000000065 +217.58195000000208 +217.3148500000012 +217.7000500000011 +217.22635000000182 +217.14505000000122 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 01:25:04,308 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:04,948 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:05,460 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:06,084 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:25:06,436 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:25:07,476 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:08,212 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:25:09,748 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:25:11,428 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:25:12,372 INFO [tetris] Final score: 216 diff --git a/qlearning-results/a0.5-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..0cf8f6c --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:40:28,756 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.5 +193.7969999999999 +192.72800000000007 +193.6359999999998 +193.49500000000052 +194.94100000000026 +193.34750000000048 +194.48699999999997 +194.4004999999999 +193.89799999999997 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:40:54,867 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:55,315 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:55,603 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:55,956 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:56,275 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:56,595 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:56,947 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:57,251 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:40:57,603 INFO [tetris] Final score: 142 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:40:58,131 INFO [tetris] Final score: 218 diff --git a/qlearning-results/a0.5-g0.5-e0.5-qlearning b/qlearning-results/a0.5-g0.5-e0.5-qlearning new file mode 100644 index 0000000..6cffd36 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:25:25,702 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.5 +222.92050000000148 +220.33499999999992 +219.15265000000207 +219.5142500000014 +218.45280000000096 +218.5314500000014 +218.28495000000092 +217.89600000000118 +217.987750000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:21,155 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:33:22,356 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:24,019 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:24,964 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:26,228 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:27,459 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:29,043 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:30,244 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 01:33:31,028 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:33:32,580 INFO [tetris] Final score: 248 diff --git a/qlearning-results/a0.5-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..4cf6ca8 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:40:58,145 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.75 +197.96099999999993 +198.97100000000023 +200.02950000000013 +199.54850000000042 +199.69600000000028 +197.88949999999997 +198.17749999999975 +198.86950000000041 +199.13550000000035 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:29,779 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:41:30,291 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:31,235 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:31,827 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:41:32,227 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:33,251 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:41:33,955 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:34,435 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:41:35,187 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:41:35,907 INFO [tetris] Final score: 231 diff --git a/qlearning-results/a0.5-g0.5-e0.75-qlearning b/qlearning-results/a0.5-g0.5-e0.75-qlearning new file mode 100644 index 0000000..ff8cd48 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:33:47,681 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.75 +223.2484499999999 +221.97360000000006 +221.1147500000011 +220.56820000000087 +219.88284999999982 +220.1310000000007 +219.69925000000114 +219.76260000000016 +219.58485000000098 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:09,567 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:41:11,343 INFO [tetris] Final score: 269 +Lost due to: LockOut +2020-04-21 01:41:13,087 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:14,479 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:15,999 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:17,359 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:18,143 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:19,295 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:20,879 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:41:21,983 INFO [tetris] Final score: 247 diff --git a/qlearning-results/a0.5-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.5-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..e759f59 --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:41:35,921 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.9 +208.1320000000003 +208.23949999999988 +208.45799999999983 +208.3780000000001 +208.99550000000002 +209.09000000000043 +208.07650000000012 +208.97400000000025 +207.9755000000001 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:42:16,458 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:42:17,098 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:17,834 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:42:18,506 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:42:19,546 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:20,378 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:21,386 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:42:22,090 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:23,466 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:42:24,154 INFO [tetris] Final score: 158 diff --git a/qlearning-results/a0.5-g0.5-e0.9-qlearning b/qlearning-results/a0.5-g0.5-e0.9-qlearning new file mode 100644 index 0000000..f61efee --- /dev/null +++ b/qlearning-results/a0.5-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:41:38,391 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.5, exploration_rate = 0.9 +223.55335000000053 +222.89440000000275 +222.76655000000048 +221.91440000000173 +221.9375500000019 +221.7492500000019 +221.86305000000246 +221.64800000000145 +221.42140000000026 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:38,825 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:40,121 INFO [tetris] Final score: 269 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:40,857 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:42,329 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:43,945 INFO [tetris] Final score: 261 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:48:45,208 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:48:46,121 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:48,040 INFO [tetris] Final score: 292 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:49,400 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:48:50,169 INFO [tetris] Final score: 204 diff --git a/qlearning-results/a0.5-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..783f323 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:36:28,129 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.1 +202.97050000000047 +202.64200000000017 +203.3925000000003 +202.7515000000006 +203.1550000000002 +203.68400000000057 +202.99150000000046 +229.16699999999997 +202.97650000000021 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,081 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:36:52,306 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,482 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,658 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:52,850 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:36:53,170 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:53,362 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:53,602 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:53,811 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:54,017 INFO [tetris] Final score: 201 diff --git a/qlearning-results/a0.5-g0.9-e0.1-qlearning b/qlearning-results/a0.5-g0.9-e0.1-qlearning new file mode 100644 index 0000000..57fd656 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:28:36,020 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.1 +214.4217500000025 +213.4693500000033 +212.35110000000185 +211.8555500000019 +211.06070000000088 +211.5214500000015 +210.9196000000009 +210.7748500000007 +210.66610000000165 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:21,382 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:36:21,990 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:23,286 INFO [tetris] Final score: 280 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:23,942 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:36:24,581 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:36:25,318 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:26,149 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:36:27,094 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:36:28,038 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:36:28,741 INFO [tetris] Final score: 209 diff --git a/qlearning-results/a0.5-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..441c7ac --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:36:54,029 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.25 +200.2145000000004 +200.25650000000027 +201.15650000000036 +200.76500000000038 +200.30800000000002 +203.1889999999998 +205.19950000000046 +204.66200000000038 +202.46200000000036 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,123 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,331 INFO [tetris] Final score: 206 +Lost due to: LockOut +2020-04-20 20:37:22,555 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,747 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:22,987 INFO [tetris] Final score: 202 +Lost due to: LockOut +2020-04-20 20:37:23,227 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:37:23,451 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:37:23,723 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:37:23,931 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:37:24,155 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.5-g0.9-e0.25-qlearning b/qlearning-results/a0.5-g0.9-e0.25-qlearning new file mode 100644 index 0000000..8801cb5 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:36:40,028 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.25 +219.52965000000134 +217.46480000000184 +217.05085000000153 +216.81235000000166 +216.0612000000017 +216.112400000001 +216.3895000000015 +216.86630000000244 +217.4367500000018 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:47,540 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:48,276 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:49,156 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:44:49,861 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:50,596 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:44:51,476 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:44:51,956 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:44:53,220 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:44:54,437 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:44:55,668 INFO [tetris] Final score: 195 diff --git a/qlearning-results/a0.5-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..dc4f6de --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:37:24,167 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.5 +198.03900000000033 +198.39150000000035 +196.89949999999996 +193.20600000000013 +196.12100000000032 +195.79950000000042 +196.5690000000002 +193.33049999999972 +196.24600000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:57,626 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:58,186 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:37:59,370 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:00,458 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:00,794 INFO [tetris] Final score: 122 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:01,322 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:01,754 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:02,330 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:03,018 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:04,250 INFO [tetris] Final score: 240 diff --git a/qlearning-results/a0.5-g0.9-e0.5-qlearning b/qlearning-results/a0.5-g0.9-e0.5-qlearning new file mode 100644 index 0000000..bb7aec8 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:45:08,355 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.5 +223.13450000000327 +221.98790000000136 +220.86610000000152 +221.07425000000228 +220.40510000000216 +219.85380000000075 +219.9902500000028 +219.56560000000047 +219.67725000000144 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:52:59,125 INFO [tetris] Final score: 283 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:00,277 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:02,116 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:03,141 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:04,069 INFO [tetris] Final score: 165 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:04,405 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:53:05,381 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:53:07,125 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:53:08,661 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:53:10,341 INFO [tetris] Final score: 254 diff --git a/qlearning-results/a0.5-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..758d8f3 --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:38:04,265 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.75 +200.59000000000043 +199.5249999999997 +199.93000000000018 +198.82799999999986 +198.56499999999997 +198.56899999999956 +198.13950000000006 +199.04650000000038 +199.0665000000002 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:35,806 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:36,110 INFO [tetris] Final score: 128 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:38:36,878 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:38:37,518 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:38,206 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:38,910 INFO [tetris] Final score: 117 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:38:40,414 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:42,110 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:42,638 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:38:43,759 INFO [tetris] Final score: 134 diff --git a/qlearning-results/a0.5-g0.9-e0.75-qlearning b/qlearning-results/a0.5-g0.9-e0.75-qlearning new file mode 100644 index 0000000..61cc5eb --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:53:24,589 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.75 +224.54995000000062 +222.71715000000097 +222.9293000000019 +222.2246000000026 +222.39265000000157 +222.02625000000216 +222.27435 +221.88075000000086 +221.68995000000257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:53,262 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:00:54,126 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:54,894 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:55,854 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:00:57,438 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:00:59,086 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:01:00,094 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:01:01,614 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:01:02,190 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:01:03,518 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.5-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.5-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..44aef7f --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:38:43,772 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.9 +210.3485000000002 +207.0494999999995 +208.65500000000037 +208.2455 +208.08599999999996 +207.6900000000003 +208.2600000000004 +207.8269999999999 +208.09000000000012 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:21,191 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:39:22,247 INFO [tetris] Final score: 275 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:39:23,255 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:39:24,327 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:24,983 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:26,103 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:26,599 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:39:27,975 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:28,615 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:39:29,543 INFO [tetris] Final score: 201 diff --git a/qlearning-results/a0.5-g0.9-e0.9-qlearning b/qlearning-results/a0.5-g0.9-e0.9-qlearning new file mode 100644 index 0000000..8d8368d --- /dev/null +++ b/qlearning-results/a0.5-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 01:01:19,704 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 0.9, exploration_rate = 0.9 +224.0625500000003 +223.82470000000038 +223.50610000000273 +223.70925000000082 +223.13070000000013 +223.37880000000027 +223.02480000000185 +223.8222000000021 +222.90475000000066 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:27,931 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:29,132 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:30,620 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:08:31,435 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 01:08:32,220 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:08:32,940 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 01:08:34,684 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:35,660 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 01:08:37,052 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 01:08:38,620 INFO [tetris] Final score: 208 diff --git a/qlearning-results/a0.5-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..0b7a527 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 19:56:40,713 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.1 +314.89899999999966 +379.1089999999996 +374.69950000000034 +374.3954999999996 +372.0515000000002 +374.66099999999955 +372.2300000000005 +375.03550000000064 +373.0990000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:18:46,365 INFO [tetris] Final score: 366 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:19:16,541 INFO [tetris] Final score: 423 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:19:59,453 INFO [tetris] Final score: 553 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:20:17,773 INFO [tetris] Final score: 309 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:20:43,677 INFO [tetris] Final score: 362 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:21:15,101 INFO [tetris] Final score: 353 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:21:44,413 INFO [tetris] Final score: 413 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:22:10,925 INFO [tetris] Final score: 449 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:22:29,901 INFO [tetris] Final score: 339 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:22:56,925 INFO [tetris] Final score: 372 diff --git a/qlearning-results/a0.5-g1.0-e0.1-qlearning b/qlearning-results/a0.5-g1.0-e0.1-qlearning new file mode 100644 index 0000000..c19f6c8 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:35:57,164 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.1 +217.07010000000116 +215.48750000000126 +213.89315000000127 +213.9104000000016 +213.30205000000078 +213.07895000000104 +213.82985000000173 +214.11780000000059 +214.23375000000158 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:41,844 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:44,852 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 23:49:46,660 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 23:49:47,764 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:49,012 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 23:49:49,892 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:50,836 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 23:49:52,692 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 23:49:53,764 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 23:49:55,908 INFO [tetris] Final score: 245 diff --git a/qlearning-results/a0.5-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..c0d39f4 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:22:56,937 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.25 +265.472 +301.7995000000003 +300.2394999999993 +297.31350000000015 +296.8614999999998 +298.42900000000003 +298.0585000000003 +298.2465 +297.685 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:27,245 INFO [tetris] Final score: 377 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:36,141 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:47,436 INFO [tetris] Final score: 327 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:29:51,980 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:30:01,820 INFO [tetris] Final score: 312 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:30:10,029 INFO [tetris] Final score: 376 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:30:19,100 INFO [tetris] Final score: 326 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:30:24,572 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:30:31,052 INFO [tetris] Final score: 290 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:30:37,228 INFO [tetris] Final score: 285 diff --git a/qlearning-results/a0.5-g1.0-e0.25-qlearning b/qlearning-results/a0.5-g1.0-e0.25-qlearning new file mode 100644 index 0000000..9a480c1 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-20 23:50:10,585 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.25 +220.29590000000104 +218.86105000000103 +218.401800000002 +217.84359999999987 +217.01880000000187 +216.45335000000077 +216.29710000000202 +217.15850000000142 +216.41589999999925 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:39,136 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:01:40,416 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:43,104 INFO [tetris] Final score: 355 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:44,608 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:46,464 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:48,399 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 00:01:50,416 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:52,288 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:54,096 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:01:55,104 INFO [tetris] Final score: 154 diff --git a/qlearning-results/a0.5-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..97543a6 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:30:37,235 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.5 +241.37399999999965 +257.31149999999957 +257.2300000000001 +257.35450000000003 +256.73199999999986 +255.0735 +256.7804999999999 +256.34699999999964 +256.6519999999999 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:33:06,085 INFO [tetris] Final score: 341 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:33:07,556 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:08,980 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:12,164 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:13,732 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:17,364 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:20,340 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:23,124 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:33:25,748 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:33:29,860 INFO [tetris] Final score: 303 diff --git a/qlearning-results/a0.5-g1.0-e0.5-qlearning b/qlearning-results/a0.5-g1.0-e0.5-qlearning new file mode 100644 index 0000000..9158afd --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:02:10,694 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.5 +222.3642500000013 +219.94975000000082 +219.3147500000007 +219.1652000000013 +218.8023500000009 +218.44645000000153 +218.14265000000276 +219.10639999999998 +218.61155000000076 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:43,071 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:11:43,903 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:44,639 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:46,655 INFO [tetris] Final score: 260 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:47,824 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:11:48,559 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 00:11:49,679 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:50,399 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:51,999 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:11:53,728 INFO [tetris] Final score: 234 diff --git a/qlearning-results/a0.5-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..a09bdec --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:33:29,865 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.75 +232.42399999999986 +237.16400000000016 +235.45699999999982 +237.99450000000027 +236.0200000000004 +235.28949999999986 +237.74450000000007 +236.42199999999983 +238.13749999999965 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:34:53,785 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 20:34:56,057 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:34:58,265 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:35:00,824 INFO [tetris] Final score: 336 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:02,937 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:05,465 INFO [tetris] Final score: 297 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:07,208 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:35:08,297 INFO [tetris] Final score: 172 +Lost due to: LockOut +2020-04-20 20:35:10,873 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:35:12,489 INFO [tetris] Final score: 270 diff --git a/qlearning-results/a0.5-g1.0-e0.75-qlearning b/qlearning-results/a0.5-g1.0-e0.75-qlearning new file mode 100644 index 0000000..4f69ce0 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:12:10,114 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.75 +222.72695000000047 +221.92535000000115 +221.42015000000058 +221.31975000000068 +221.83170000000075 +221.0318000000008 +220.87970000000115 +221.33515000000043 +220.92910000000137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:16,781 INFO [tetris] Final score: 292 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:18,157 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 00:20:19,149 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:20,957 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 00:20:22,126 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:22,973 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:24,318 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:25,342 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 00:20:27,101 INFO [tetris] Final score: 280 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:20:27,998 INFO [tetris] Final score: 190 diff --git a/qlearning-results/a0.5-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.5-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..58b6e4f --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:35:12,493 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.9 +227.1795000000003 +227.45000000000027 +227.78050000000044 +227.90849999999978 +227.35900000000012 +226.63799999999975 +226.39900000000037 +228.26449999999986 +226.9465000000005 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:15,916 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:16,845 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:18,284 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:19,820 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 20:36:20,908 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 20:36:22,652 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:24,333 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 20:36:25,084 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 20:36:26,684 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 20:36:28,124 INFO [tetris] Final score: 237 diff --git a/qlearning-results/a0.5-g1.0-e0.9-qlearning b/qlearning-results/a0.5-g1.0-e0.9-qlearning new file mode 100644 index 0000000..89d6c39 --- /dev/null +++ b/qlearning-results/a0.5-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 00:20:45,202 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.5, discount_rate = 1, exploration_rate = 0.9 +223.9953500000022 +222.9694500000008 +223.12235000000067 +222.78095000000044 +223.28720000000112 +223.08990000000207 +222.52310000000165 +222.5425500000013 +222.57469999999955 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:05,607 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:06,796 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:08,428 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:10,011 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:11,772 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:13,068 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:14,108 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:15,691 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:17,244 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 00:28:18,412 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.7-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..4d43600 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:31:56,907 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.1 +205.06799999999987 +203.4910000000006 +203.63700000000023 +204.9025000000004 +205.20100000000005 +203.47650000000007 +203.90649999999988 +204.76350000000016 +203.32050000000007 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:32:27,963 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:32:28,316 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:32:28,556 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:32:28,844 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:32:29,100 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:29,500 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:29,851 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:30,043 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:30,267 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:30,508 INFO [tetris] Final score: 195 diff --git a/qlearning-results/a0.7-g0.1-e0.1-qlearning b/qlearning-results/a0.7-g0.1-e0.1-qlearning new file mode 100644 index 0000000..ab3d4d4 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:45:27,198 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.1 +220.9134500000001 +218.46825000000183 +216.96985000000112 +216.49105000000256 +216.20370000000025 +215.66090000000148 +216.01450000000276 +215.32430000000122 +215.499950000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:07,450 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:08,489 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:53:09,145 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:10,713 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:11,194 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:12,122 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:12,489 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:13,386 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:53:14,090 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:53:14,650 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.7-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..6c913bf --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:32:30,522 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.25 +197.7265 +196.07349999999977 +195.49700000000016 +197.3180000000003 +196.854 +197.9659999999997 +196.6815000000002 +197.66900000000015 +196.31300000000013 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:32:56,999 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:32:57,175 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:57,543 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:57,783 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:58,007 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:58,375 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:58,759 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:59,191 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:59,495 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:32:59,735 INFO [tetris] Final score: 202 diff --git a/qlearning-results/a0.7-g0.1-e0.25-qlearning b/qlearning-results/a0.7-g0.1-e0.25-qlearning new file mode 100644 index 0000000..87ba602 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:53:26,134 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.25 +221.59775000000022 +219.6475500000023 +218.40525000000093 +217.8659000000026 +217.86260000000172 +217.56835000000152 +216.838600000002 +216.65040000000275 +216.9875500000018 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:42,328 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:44,536 INFO [tetris] Final score: 279 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:01:45,352 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:46,456 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 05:01:47,192 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:47,928 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:01:49,208 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:50,120 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:01:51,320 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:01:53,144 INFO [tetris] Final score: 265 diff --git a/qlearning-results/a0.7-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..47ea943 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:32:59,746 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.5 +193.10700000000023 +192.20799999999977 +192.60200000000037 +193.61800000000002 +193.59550000000024 +193.2725000000001 +194.71150000000011 +193.1584999999996 +193.34999999999997 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:33:27,856 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:28,224 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:28,785 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:33:29,744 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:30,081 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:33:30,816 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:31,232 INFO [tetris] Final score: 157 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:33:31,584 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:32,000 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:33:32,992 INFO [tetris] Final score: 214 diff --git a/qlearning-results/a0.7-g0.1-e0.5-qlearning b/qlearning-results/a0.7-g0.1-e0.5-qlearning new file mode 100644 index 0000000..32bced8 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:02:06,528 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.5 +222.32865000000132 +220.41344999999964 +219.4295000000009 +219.03665000000092 +218.2892000000019 +218.8974500000003 +218.19365000000175 +218.02370000000064 +217.78890000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:03,098 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:04,058 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:05,226 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:07,002 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:08,025 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:10:09,129 INFO [tetris] Final score: 161 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:10,170 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:10:11,321 INFO [tetris] Final score: 206 +Lost due to: LockOut +2020-04-21 05:10:12,090 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:10:12,905 INFO [tetris] Final score: 173 diff --git a/qlearning-results/a0.7-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..5dbabd1 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:33:33,005 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.75 +198.2490000000002 +199.33100000000022 +198.01250000000007 +199.5534999999999 +199.09949999999992 +199.26700000000008 +199.94449999999983 +199.19699999999986 +198.18700000000027 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:06,201 INFO [tetris] Final score: 129 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:07,081 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:34:07,800 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:34:08,232 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:08,617 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:09,129 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:09,641 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:10,217 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:10,985 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:11,305 INFO [tetris] Final score: 189 diff --git a/qlearning-results/a0.7-g0.1-e0.75-qlearning b/qlearning-results/a0.7-g0.1-e0.75-qlearning new file mode 100644 index 0000000..5b1ead6 --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:10:28,038 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.75 +222.84460000000047 +221.21740000000136 +220.92145000000144 +220.32520000000156 +220.87345000000195 +219.41920000000152 +220.1004000000004 +219.86515000000256 +219.05949999999996 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 05:17:50,430 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:17:51,726 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:17:52,382 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:17:54,894 INFO [tetris] Final score: 271 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 05:17:57,565 INFO [tetris] Final score: 290 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:17:59,342 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:18:00,670 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:18:01,470 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 05:18:02,766 INFO [tetris] Final score: 171 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:18:03,534 INFO [tetris] Final score: 191 diff --git a/qlearning-results/a0.7-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.7-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..dab050a --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:34:11,321 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.9 +208.7134999999998 +209.66350000000003 +208.55499999999992 +208.98049999999984 +209.2109999999997 +210.35750000000024 +208.9709999999997 +210.14850000000013 +209.65449999999998 +Lost due to: LockOut +2020-04-20 21:34:53,086 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:54,175 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:55,295 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:34:56,110 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:34:57,598 INFO [tetris] Final score: 289 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:34:58,879 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:34:59,310 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:35:00,270 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:35:01,215 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:35:02,959 INFO [tetris] Final score: 280 diff --git a/qlearning-results/a0.7-g0.1-e0.9-qlearning b/qlearning-results/a0.7-g0.1-e0.9-qlearning new file mode 100644 index 0000000..7e1a65c --- /dev/null +++ b/qlearning-results/a0.7-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:18:20,073 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.1, exploration_rate = 0.9 +223.62495000000055 +222.95280000000224 +222.44615000000033 +222.4190999999997 +222.1059500000012 +222.01755000000145 +221.85240000000096 +221.5929000000017 +221.48355000000163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:20,161 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:25:21,312 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:22,865 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:25:24,609 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:26,401 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:25:27,985 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:28,769 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:29,617 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:25:31,105 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:25:31,969 INFO [tetris] Final score: 240 diff --git a/qlearning-results/a0.7-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..d4ef582 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:29:01,106 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.1 +203.66950000000043 +203.75650000000047 +203.91750000000053 +204.8320000000005 +204.38150000000022 +204.16349999999997 +204.26450000000017 +205.90300000000045 +204.63100000000037 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 21:29:28,137 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:29:28,377 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:29:28,601 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:28,777 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,017 INFO [tetris] Final score: 200 +Lost due to: LockOut +2020-04-20 21:29:29,225 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,465 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,641 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:29,848 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:30,041 INFO [tetris] Final score: 194 diff --git a/qlearning-results/a0.7-g0.5-e0.1-qlearning b/qlearning-results/a0.7-g0.5-e0.1-qlearning new file mode 100644 index 0000000..880869f --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:05:16,119 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.1 +221.2680000000025 +218.37540000000104 +217.10060000000163 +216.2829500000021 +215.81070000000227 +215.82075000000367 +215.77970000000266 +215.58015000000194 +215.40985000000137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:12:53,680 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:12:54,528 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:12:55,760 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:12:56,448 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:12:56,896 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-21 04:12:57,792 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 04:12:58,640 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:12:59,120 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:12:59,856 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:13:00,336 INFO [tetris] Final score: 227 diff --git a/qlearning-results/a0.7-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..076e60c --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:29:30,056 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.25 +198.48499999999999 +197.94750000000002 +197.9815000000003 +197.8610000000002 +198.40700000000004 +198.67700000000053 +198.43300000000016 +197.6905 +197.90250000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:54,727 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:55,382 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:29:55,607 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:55,830 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,006 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,231 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,486 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,806 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:56,998 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:57,222 INFO [tetris] Final score: 181 diff --git a/qlearning-results/a0.7-g0.5-e0.25-qlearning b/qlearning-results/a0.7-g0.5-e0.25-qlearning new file mode 100644 index 0000000..10a7508 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:13:11,811 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.25 +221.77135000000226 +219.93090000000265 +218.68195000000202 +218.32155000000182 +217.83570000000154 +217.31070000000253 +217.6305000000007 +217.02715000000006 +216.85685000000282 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:21:24,555 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:25,467 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:25,852 INFO [tetris] Final score: 195 +Lost due to: LockOut +2020-04-21 04:21:26,667 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:28,284 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 04:21:30,300 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:30,908 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:31,947 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:33,420 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:21:34,139 INFO [tetris] Final score: 212 diff --git a/qlearning-results/a0.7-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..4c499c7 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:29:57,236 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.5 +194.04349999999994 +193.6319999999999 +194.0960000000002 +193.00299999999976 +193.12700000000044 +193.7154999999996 +192.96799999999976 +193.61299999999963 +193.4725000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:23,417 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:30:23,865 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:24,121 INFO [tetris] Final score: 152 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:24,857 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:25,145 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:25,577 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:30:25,817 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:26,377 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:27,065 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:27,417 INFO [tetris] Final score: 190 diff --git a/qlearning-results/a0.7-g0.5-e0.5-qlearning b/qlearning-results/a0.7-g0.5-e0.5-qlearning new file mode 100644 index 0000000..9f993a8 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:21:47,620 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.5 +221.78365000000016 +220.36190000000127 +219.6699999999998 +218.93470000000156 +218.8314000000004 +218.64460000000062 +218.135150000001 +218.07535000000257 +217.76015000000226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:43,560 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:45,224 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:29:46,376 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:47,495 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:48,376 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:49,847 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:51,688 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:29:52,664 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 04:29:53,704 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 04:29:54,839 INFO [tetris] Final score: 193 diff --git a/qlearning-results/a0.7-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..bf81d81 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:30:27,427 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.75 +197.35000000000025 +197.59800000000004 +199.0745000000005 +198.75000000000014 +198.53600000000026 +198.09599999999978 +199.12850000000057 +200.65399999999983 +199.04100000000022 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:30:59,969 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:00,609 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:01,281 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:02,033 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:02,689 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:31:03,569 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:31:04,001 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 21:31:04,289 INFO [tetris] Final score: 136 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:31:04,657 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:05,057 INFO [tetris] Final score: 208 diff --git a/qlearning-results/a0.7-g0.5-e0.75-qlearning b/qlearning-results/a0.7-g0.5-e0.75-qlearning new file mode 100644 index 0000000..d0e90c1 --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:30:09,869 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.75 +222.62120000000016 +221.6745000000016 +220.5436500000018 +220.48410000000118 +220.41135000000045 +220.0337500000004 +220.05105000000037 +219.8228000000005 +219.5190500000022 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:37:31,870 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:37:33,166 INFO [tetris] Final score: 274 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:35,022 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:35,998 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:37:37,534 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 04:37:38,829 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:39,645 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:40,205 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:37:40,990 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 04:37:42,030 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.7-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.7-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..f26ea4d --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:31:05,073 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.9 +209.0025000000003 +208.94150000000002 +207.52350000000015 +208.3560000000002 +208.35900000000015 +209.61550000000045 +208.1925000000006 +207.99900000000036 +207.7845 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:46,015 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:47,054 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:47,822 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:49,214 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:31:50,158 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:31:51,295 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:31:53,471 INFO [tetris] Final score: 294 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:54,703 INFO [tetris] Final score: 306 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:55,790 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:31:56,895 INFO [tetris] Final score: 195 diff --git a/qlearning-results/a0.7-g0.5-e0.9-qlearning b/qlearning-results/a0.7-g0.5-e0.9-qlearning new file mode 100644 index 0000000..40fa1ac --- /dev/null +++ b/qlearning-results/a0.7-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 04:37:58,380 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.5, exploration_rate = 0.9 +223.66915000000006 +222.5219500000002 +222.25485000000012 +221.88305000000136 +221.82749999999987 +222.20430000000144 +221.796850000001 +221.66300000000106 +221.49145000000166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:44:58,347 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:00,043 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:01,451 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:02,651 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:04,108 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:05,355 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:06,539 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:45:08,011 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:09,259 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:45:09,995 INFO [tetris] Final score: 192 diff --git a/qlearning-results/a0.7-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..ac837ca --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:25:29,561 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.1 +203.17500000000055 +203.03650000000044 +202.6135000000002 +203.43050000000056 +202.74100000000027 +229.90399999999988 +215.91400000000124 +203.10550000000046 +203.25400000000053 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,225 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,417 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,610 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:06,818 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,041 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:26:07,313 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,490 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,681 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:07,873 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:26:08,049 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.7-g0.9-e0.1-qlearning b/qlearning-results/a0.7-g0.9-e0.1-qlearning new file mode 100644 index 0000000..c5c61a4 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:25:00,944 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.1 +215.13885000000053 +211.81050000000175 +210.32375000000297 +209.97575000000114 +209.49600000000055 +209.04845000000097 +208.89100000000022 +208.95150000000052 +209.1011000000007 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:32:35,860 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 03:32:37,332 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:32:38,468 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:39,492 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:40,260 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:40,996 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:32:41,444 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:32:42,276 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:32:43,124 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:32:44,228 INFO [tetris] Final score: 259 diff --git a/qlearning-results/a0.7-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..5c081b4 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:26:08,065 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.25 +199.73100000000008 +200.95750000000007 +200.71950000000038 +200.4030000000002 +204.4489999999996 +202.65200000000038 +200.87399999999997 +199.7300000000001 +205.55450000000008 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:26:43,259 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:44,267 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:45,963 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:46,827 INFO [tetris] Final score: 165 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:48,506 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:49,146 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:49,706 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:50,202 INFO [tetris] Final score: 161 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:52,458 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:26:54,395 INFO [tetris] Final score: 308 diff --git a/qlearning-results/a0.7-g0.9-e0.25-qlearning b/qlearning-results/a0.7-g0.9-e0.25-qlearning new file mode 100644 index 0000000..0bd9358 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:32:55,152 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.25 +222.45555000000095 +220.46840000000304 +219.28965000000224 +218.4454500000004 +218.04345000000154 +218.02265000000176 +217.59115000000156 +217.36425000000284 +217.25680000000168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:40:59,401 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:00,489 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:41:01,289 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:41:01,577 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:02,057 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:03,081 INFO [tetris] Final score: 269 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:03,897 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:05,081 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:06,553 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:41:07,113 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.7-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..9b19fff --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:26:54,409 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.5 +198.1325000000001 +198.59149999999997 +194.69349999999994 +195.7760000000001 +196.59150000000034 +195.17499999999973 +194.40199999999982 +195.71100000000047 +198.3695000000004 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:27:26,306 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:27,507 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:27:28,002 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:27:29,042 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:29,730 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:30,947 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:32,402 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:33,251 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:27:34,338 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:27:35,634 INFO [tetris] Final score: 228 diff --git a/qlearning-results/a0.7-g0.9-e0.5-qlearning b/qlearning-results/a0.7-g0.9-e0.5-qlearning new file mode 100644 index 0000000..6a01ba2 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:41:19,797 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.5 +223.51380000000077 +222.1278000000015 +221.6259000000018 +220.6082500000004 +220.54835000000202 +220.51770000000144 +220.31765000000223 +220.23355000000177 +219.9084500000021 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 03:49:11,930 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:13,243 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:49:13,754 INFO [tetris] Final score: 217 +Lost due to: LockOut +2020-04-21 03:49:14,779 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:49:17,387 INFO [tetris] Final score: 302 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:18,315 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:19,483 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:49:20,427 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:21,643 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:49:23,514 INFO [tetris] Final score: 342 diff --git a/qlearning-results/a0.7-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..d08616c --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:27:35,647 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.75 +200.55050000000023 +199.57249999999993 +200.44399999999987 +199.94550000000052 +199.73450000000054 +199.9685000000002 +198.38999999999993 +197.7865000000001 +199.29950000000014 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:08,407 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:09,319 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:09,831 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:10,823 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:11,415 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:12,423 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:12,983 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:14,231 INFO [tetris] Final score: 281 +Lost due to: LockOut +2020-04-20 21:28:14,919 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:15,607 INFO [tetris] Final score: 220 diff --git a/qlearning-results/a0.7-g0.9-e0.75-qlearning b/qlearning-results/a0.7-g0.9-e0.75-qlearning new file mode 100644 index 0000000..91f6304 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:49:37,711 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.75 +224.23100000000096 +223.19695000000027 +222.8860500000018 +222.29845000000086 +222.98580000000118 +222.6816999999997 +222.5675000000006 +222.46140000000213 +222.3639000000002 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:57:09,901 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:57:11,134 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 03:57:13,581 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:15,150 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:15,934 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:57:16,670 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:17,518 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:19,214 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:20,333 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:57:21,966 INFO [tetris] Final score: 297 diff --git a/qlearning-results/a0.7-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.7-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..f5d4308 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:28:15,621 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.9 +208.77800000000016 +208.0775000000002 +208.76200000000014 +207.2684999999999 +207.94550000000044 +207.4880000000006 +207.568 +208.248 +207.29050000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:53,193 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:53,944 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:54,441 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:55,369 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:56,953 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:28:57,785 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:28:58,809 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:28:59,656 INFO [tetris] Final score: 231 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:00,521 INFO [tetris] Final score: 159 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:29:01,096 INFO [tetris] Final score: 176 diff --git a/qlearning-results/a0.7-g0.9-e0.9-qlearning b/qlearning-results/a0.7-g0.9-e0.9-qlearning new file mode 100644 index 0000000..e116a47 --- /dev/null +++ b/qlearning-results/a0.7-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:57:38,040 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 0.9, exploration_rate = 0.9 +223.2869000000006 +223.9131500000013 +224.14165000000034 +223.62774999999982 +223.37350000000228 +223.36860000000013 +223.41860000000142 +222.8307000000011 +223.13380000000205 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 04:04:48,619 INFO [tetris] Final score: 273 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:49,771 INFO [tetris] Final score: 277 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:51,292 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:52,475 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:53,307 INFO [tetris] Final score: 180 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:54,155 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:55,484 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:57,211 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:57,739 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 04:04:58,891 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.7-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..86abdd4 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 20:45:37,511 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.1 +335.5434999999998 +373.49600000000015 +371.8569999999996 +372.2030000000001 +370.8865000000002 +372.12649999999996 +373.30850000000004 +373.637 +371.95349999999905 +Lost due to: LockOut +2020-04-20 21:07:59,961 INFO [tetris] Final score: 291 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:08:21,706 INFO [tetris] Final score: 346 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:08:40,282 INFO [tetris] Final score: 361 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:09:17,754 INFO [tetris] Final score: 408 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:09:42,217 INFO [tetris] Final score: 335 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:10:00,697 INFO [tetris] Final score: 306 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:10:30,041 INFO [tetris] Final score: 367 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:11:12,265 INFO [tetris] Final score: 491 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:11:33,082 INFO [tetris] Final score: 359 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:11:58,473 INFO [tetris] Final score: 314 diff --git a/qlearning-results/a0.7-g1.0-e0.1-qlearning b/qlearning-results/a0.7-g1.0-e0.1-qlearning new file mode 100644 index 0000000..7157b34 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:29:23,512 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.1 +218.13240000000118 +215.3771500000018 +215.7146000000007 +216.35280000000142 +215.86075000000187 +217.47790000000032 +215.4513000000002 +215.5094500000005 +215.9326499999996 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:48,480 INFO [tetris] Final score: 263 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:51,344 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:56,448 INFO [tetris] Final score: 387 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 02:44:58,032 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:44:59,936 INFO [tetris] Final score: 337 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:45:02,688 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:45:04,705 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:45:07,056 INFO [tetris] Final score: 290 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:45:09,777 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:45:11,201 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.7-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..b7c61d1 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:11:58,489 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.25 +274.52300000000014 +298.8889999999992 +298.89950000000033 +297.5165000000004 +295.3755 +296.84199999999936 +296.1189999999999 +299.6350000000002 +297.683 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:18:20,940 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:18:31,212 INFO [tetris] Final score: 348 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:18:40,108 INFO [tetris] Final score: 314 +Lost due to: LockOut +2020-04-20 21:18:46,444 INFO [tetris] Final score: 305 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:18:57,964 INFO [tetris] Final score: 382 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:06,524 INFO [tetris] Final score: 281 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:11,356 INFO [tetris] Final score: 309 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:17,707 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:23,452 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:19:35,068 INFO [tetris] Final score: 338 diff --git a/qlearning-results/a0.7-g1.0-e0.25-qlearning b/qlearning-results/a0.7-g1.0-e0.25-qlearning new file mode 100644 index 0000000..51ac102 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:45:26,480 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.25 +219.72690000000298 +218.82865000000038 +219.32520000000136 +219.66200000000066 +219.37170000000083 +218.15705000000122 +218.2327500000009 +219.18330000000105 +217.24985000000268 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:57:34,042 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 02:57:36,201 INFO [tetris] Final score: 297 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:37,322 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:38,362 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:40,233 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:42,618 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 02:57:43,834 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:45,242 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 02:57:46,650 INFO [tetris] Final score: 146 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 02:57:48,410 INFO [tetris] Final score: 231 diff --git a/qlearning-results/a0.7-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..2c835ac --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:19:35,081 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.5 +247.90150000000008 +257.94699999999995 +258.18599999999975 +258.4640000000003 +256.9974999999999 +256.7455 +257.53100000000006 +257.3304999999997 +257.3244999999996 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:22:03,559 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:08,008 INFO [tetris] Final score: 244 +Lost due to: LockOut +2020-04-20 21:22:09,656 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:13,607 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:22:16,183 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:17,880 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:20,407 INFO [tetris] Final score: 299 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:24,375 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:29,303 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:22:32,775 INFO [tetris] Final score: 237 diff --git a/qlearning-results/a0.7-g1.0-e0.5-qlearning b/qlearning-results/a0.7-g1.0-e0.5-qlearning new file mode 100644 index 0000000..185507f --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 02:58:04,445 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.5 +221.81855000000067 +221.78095000000235 +220.20000000000195 +220.13229999999908 +220.20355000000052 +222.93500000000165 +219.11325000000022 +219.53685000000127 +218.94330000000096 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 03:07:51,774 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:53,614 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:55,374 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:57,278 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:07:58,814 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:08:01,326 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:08:02,750 INFO [tetris] Final score: 282 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:08:04,158 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:08:05,854 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:08:07,054 INFO [tetris] Final score: 257 diff --git a/qlearning-results/a0.7-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..8bc6929 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:22:32,784 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.75 +234.73200000000008 +235.39850000000052 +237.45550000000017 +236.32299999999995 +234.66250000000005 +236.61350000000024 +237.03899999999976 +237.18350000000044 +235.64149999999987 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:23:55,839 INFO [tetris] Final score: 230 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:23:57,504 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 21:23:59,280 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:01,055 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:03,039 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:24:04,495 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:24:06,447 INFO [tetris] Final score: 269 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:24:07,615 INFO [tetris] Final score: 156 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:10,431 INFO [tetris] Final score: 311 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:24:11,967 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.7-g1.0-e0.75-qlearning b/qlearning-results/a0.7-g1.0-e0.75-qlearning new file mode 100644 index 0000000..0b4734d --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:08:23,678 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.75 +223.0385500000023 +222.11090000000146 +221.80540000000062 +221.5786500000007 +221.59845000000067 +221.54075000000057 +221.12695000000113 +221.47705000000155 +221.25295000000023 +Lost due to: LockOut +2020-04-21 03:16:35,363 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:37,348 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:39,155 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:42,483 INFO [tetris] Final score: 310 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:43,075 INFO [tetris] Final score: 143 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:45,043 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:46,131 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 03:16:47,939 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:49,827 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:16:51,635 INFO [tetris] Final score: 253 diff --git a/qlearning-results/a0.7-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.7-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..d57245b --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:24:11,981 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.9 +227.1814999999998 +227.54699999999985 +227.98299999999986 +228.55650000000023 +227.5055000000001 +227.2115000000003 +226.91000000000008 +229.05350000000018 +228.59800000000052 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:25:16,620 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:25:17,452 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:25:19,388 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:25:20,940 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:25:22,220 INFO [tetris] Final score: 189 +Lost due to: LockOut +2020-04-20 21:25:23,996 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:25:25,101 INFO [tetris] Final score: 157 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 21:25:25,932 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:25:27,452 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 21:25:29,548 INFO [tetris] Final score: 220 diff --git a/qlearning-results/a0.7-g1.0-e0.9-qlearning b/qlearning-results/a0.7-g1.0-e0.9-qlearning new file mode 100644 index 0000000..85661d8 --- /dev/null +++ b/qlearning-results/a0.7-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 03:17:08,898 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.7, discount_rate = 1, exploration_rate = 0.9 +223.7244000000009 +223.32210000000117 +223.4059000000003 +223.3708500000003 +223.1680000000011 +223.35150000000203 +223.01960000000196 +223.27850000000092 +223.0071500000017 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:30,404 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:31,636 INFO [tetris] Final score: 161 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:33,380 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:34,500 INFO [tetris] Final score: 162 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:36,372 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:24:37,924 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:39,044 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 03:24:40,148 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 03:24:41,219 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 03:24:43,411 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.9-g0.1-e0.1-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.1-approximateqlearning new file mode 100644 index 0000000..e7c4005 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:21:38,649 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.1 +204.72000000000017 +204.9555000000002 +204.73150000000055 +204.15749999999997 +203.75300000000044 +203.68850000000003 +204.8665 +205.62800000000075 +204.5355000000006 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:22:11,912 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,296 INFO [tetris] Final score: 219 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,472 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,680 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:12,920 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:13,176 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:22:13,337 INFO [tetris] Final score: 160 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:14,088 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:14,360 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:22:14,553 INFO [tetris] Final score: 184 diff --git a/qlearning-results/a0.9-g0.1-e0.1-qlearning b/qlearning-results/a0.9-g0.1-e0.1-qlearning new file mode 100644 index 0000000..d59d1e4 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:44:28,713 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.1 +220.79705000000098 +217.88625000000195 +216.9815500000009 +216.4122000000027 +216.27310000000222 +216.22095000000283 +215.41645000000128 +215.9998000000015 +215.62740000000082 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:07,247 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:07,982 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:08,703 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:09,471 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:52:10,223 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:11,039 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:52:11,583 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:12,638 INFO [tetris] Final score: 251 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:52:13,262 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:52:13,727 INFO [tetris] Final score: 198 diff --git a/qlearning-results/a0.9-g0.1-e0.25-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.25-approximateqlearning new file mode 100644 index 0000000..00e88ca --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:22:14,565 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.25 +198.19049999999933 +195.8015000000002 +195.93800000000024 +196.81100000000052 +196.83000000000018 +196.78100000000015 +197.0014999999999 +196.39300000000034 +196.53350000000043 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:22:42,136 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:42,456 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:22:42,712 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,000 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,256 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,656 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:43,864 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:22:44,169 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:44,584 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:22:44,840 INFO [tetris] Final score: 146 diff --git a/qlearning-results/a0.9-g0.1-e0.25-qlearning b/qlearning-results/a0.9-g0.1-e0.25-qlearning new file mode 100644 index 0000000..3df0b91 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:52:25,240 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.25 +221.69755000000154 +219.747200000001 +218.60875000000107 +217.57425000000174 +218.1184000000003 +217.3527500000013 +216.99265000000088 +217.02095000000267 +216.84095000000266 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:38,274 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:39,074 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:40,306 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:40,818 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:41,890 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:00:42,530 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:00:44,018 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:00:44,658 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 08:00:46,370 INFO [tetris] Final score: 196 +Lost due to: LockOut +2020-04-21 08:00:46,770 INFO [tetris] Final score: 182 diff --git a/qlearning-results/a0.9-g0.1-e0.5-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.5-approximateqlearning new file mode 100644 index 0000000..b9e13b4 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:22:44,846 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.5 +194.55000000000032 +192.75949999999997 +193.3220000000002 +193.76050000000026 +193.4004999999997 +194.7199999999996 +193.88100000000017 +192.56600000000014 +193.9615 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:12,757 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:13,301 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:23:13,589 INFO [tetris] Final score: 152 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:14,053 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:14,724 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:15,140 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:15,428 INFO [tetris] Final score: 137 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:15,764 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:23:16,149 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:16,405 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.9-g0.1-e0.5-qlearning b/qlearning-results/a0.9-g0.1-e0.5-qlearning new file mode 100644 index 0000000..b1991ac --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 08:01:00,127 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.5 +222.4513500000001 +220.3924500000006 +219.65910000000176 +219.38385000000184 +218.56560000000067 +218.46385000000154 +218.31455000000076 +217.92555000000132 +217.9842500000028 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 08:08:57,454 INFO [tetris] Final score: 259 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:08:58,830 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:09:00,238 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:01,742 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:02,718 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:03,566 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:04,942 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:06,430 INFO [tetris] Final score: 163 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:07,022 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:09:08,286 INFO [tetris] Final score: 250 diff --git a/qlearning-results/a0.9-g0.1-e0.75-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.75-approximateqlearning new file mode 100644 index 0000000..fc183aa --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:23:16,415 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.75 +199.29450000000026 +199.06850000000006 +199.58450000000042 +199.93000000000043 +199.13649999999953 +199.01300000000015 +198.7224999999995 +199.10850000000022 +198.56250000000034 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:23:49,990 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:23:50,951 INFO [tetris] Final score: 210 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:23:51,670 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:52,583 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:53,446 INFO [tetris] Final score: 249 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:53,878 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:23:54,726 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:55,462 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:56,519 INFO [tetris] Final score: 246 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:23:57,607 INFO [tetris] Final score: 235 diff --git a/qlearning-results/a0.9-g0.1-e0.75-qlearning b/qlearning-results/a0.9-g0.1-e0.75-qlearning new file mode 100644 index 0000000..bf2d936 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 08:09:23,342 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.75 +222.67870000000124 +221.47485000000142 +220.88229999999945 +220.53305000000125 +220.33230000000083 +220.3560000000007 +219.8657500000003 +219.88295000000113 +219.44385000000062 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:45,716 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:47,684 INFO [tetris] Final score: 298 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:16:48,835 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:49,972 INFO [tetris] Final score: 186 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 08:16:51,204 INFO [tetris] Final score: 202 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 08:16:51,812 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 08:16:52,852 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:53,972 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:55,396 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:16:56,452 INFO [tetris] Final score: 207 diff --git a/qlearning-results/a0.9-g0.1-e0.9-approximateqlearning b/qlearning-results/a0.9-g0.1-e0.9-approximateqlearning new file mode 100644 index 0000000..dfa4a58 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:23:57,621 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.9 +207.0640000000004 +210.66100000000017 +208.24550000000045 +208.43600000000004 +208.50150000000025 +209.06300000000041 +209.48950000000042 +208.91000000000028 +209.3545000000003 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:39,721 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:41,017 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:24:42,696 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:43,913 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:24:44,633 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:45,865 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:24:46,681 INFO [tetris] Final score: 146 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:47,497 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:24:48,824 INFO [tetris] Final score: 258 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:24:49,624 INFO [tetris] Final score: 199 diff --git a/qlearning-results/a0.9-g0.1-e0.9-qlearning b/qlearning-results/a0.9-g0.1-e0.9-qlearning new file mode 100644 index 0000000..fa68735 --- /dev/null +++ b/qlearning-results/a0.9-g0.1-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 08:17:12,786 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.1, exploration_rate = 0.9 +223.35630000000114 +222.6021500000007 +222.02245000000002 +222.3257000000021 +222.35770000000105 +221.9555500000002 +222.12560000000096 +221.61450000000048 +221.706050000001 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:12,613 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:13,605 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:15,238 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:16,454 INFO [tetris] Final score: 181 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:18,038 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:18,981 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:20,533 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 08:24:22,229 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:23,558 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 08:24:24,422 INFO [tetris] Final score: 217 diff --git a/qlearning-results/a0.9-g0.5-e0.1-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.1-approximateqlearning new file mode 100644 index 0000000..b5a31c7 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:18:16,715 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.1 +204.24050000000017 +203.9764999999998 +203.46550000000076 +204.50350000000043 +203.4560000000008 +203.43850000000006 +205.10550000000035 +204.1230000000002 +204.7925000000002 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:46,477 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:46,974 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:48,894 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:50,846 INFO [tetris] Final score: 278 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:18:55,326 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:58,749 INFO [tetris] Final score: 318 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:18:59,934 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:00,478 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:19:00,701 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:19:02,878 INFO [tetris] Final score: 196 diff --git a/qlearning-results/a0.9-g0.5-e0.1-qlearning b/qlearning-results/a0.9-g0.5-e0.1-qlearning new file mode 100644 index 0000000..ae660c6 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:04:14,485 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.1 +220.1491000000026 +218.15004999999934 +217.1538500000025 +216.49000000000188 +216.699900000002 +216.08380000000165 +216.4129500000018 +215.53930000000219 +215.56590000000202 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:11:57,769 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:11:58,906 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:11:59,450 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:00,522 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:02,074 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 07:12:03,353 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:04,265 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:12:04,906 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:12:05,498 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 07:12:06,282 INFO [tetris] Final score: 223 diff --git a/qlearning-results/a0.9-g0.5-e0.25-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.25-approximateqlearning new file mode 100644 index 0000000..4a0e6d2 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:19:02,890 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.25 +197.99099999999996 +197.66300000000044 +197.51550000000015 +197.4325000000003 +197.43349999999995 +196.97400000000013 +197.6255000000005 +197.77600000000027 +196.93750000000003 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:28,469 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:28,757 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:29,286 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:29,750 INFO [tetris] Final score: 168 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:31,685 INFO [tetris] Final score: 291 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:19:32,134 INFO [tetris] Final score: 177 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:19:32,517 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:19:32,869 INFO [tetris] Final score: 145 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:34,134 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:19:34,485 INFO [tetris] Final score: 213 diff --git a/qlearning-results/a0.9-g0.5-e0.25-qlearning b/qlearning-results/a0.9-g0.5-e0.25-qlearning new file mode 100644 index 0000000..6492835 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:12:17,795 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.25 +221.4958000000015 +219.4804000000017 +218.46895000000163 +218.31290000000104 +217.58035000000186 +217.51680000000124 +216.92265000000071 +216.742700000001 +216.57269999999994 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:28,839 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:29,816 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:30,199 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:31,400 INFO [tetris] Final score: 244 +Lost due to: LockOut +2020-04-21 07:20:32,247 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:33,191 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:20:33,687 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:34,375 INFO [tetris] Final score: 221 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:35,735 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:20:36,631 INFO [tetris] Final score: 228 diff --git a/qlearning-results/a0.9-g0.5-e0.5-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.5-approximateqlearning new file mode 100644 index 0000000..b615034 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:19:34,500 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.5 +194.35150000000036 +193.85400000000044 +192.59300000000016 +192.44249999999963 +192.79150000000027 +194.87899999999942 +193.373 +193.26750000000007 +193.31949999999955 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:01,078 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:01,798 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:20:02,694 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:20:03,109 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:03,590 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:20:04,150 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:05,158 INFO [tetris] Final score: 173 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:06,086 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:06,502 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:07,046 INFO [tetris] Final score: 210 diff --git a/qlearning-results/a0.9-g0.5-e0.5-qlearning b/qlearning-results/a0.9-g0.5-e0.5-qlearning new file mode 100644 index 0000000..d85a54b --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:20:49,939 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.5 +222.80135000000013 +220.52675000000102 +219.24725000000086 +218.94735000000048 +218.56145000000072 +219.08565000000195 +218.17790000000142 +217.92955000000103 +218.26210000000088 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:45,671 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:46,023 INFO [tetris] Final score: 139 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:47,223 INFO [tetris] Final score: 223 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:28:47,959 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:49,255 INFO [tetris] Final score: 264 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:50,039 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 07:28:51,479 INFO [tetris] Final score: 178 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:28:52,071 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:28:52,871 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:28:54,871 INFO [tetris] Final score: 250 diff --git a/qlearning-results/a0.9-g0.5-e0.75-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.75-approximateqlearning new file mode 100644 index 0000000..4c8c0b0 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:20:07,059 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.75 +198.4479999999999 +199.46200000000002 +199.39050000000043 +199.45900000000015 +198.11249999999995 +198.2095000000001 +198.34499999999963 +198.5230000000004 +198.27000000000044 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:20:39,527 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:40,328 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:41,143 INFO [tetris] Final score: 176 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:41,703 INFO [tetris] Final score: 147 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:42,791 INFO [tetris] Final score: 136 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:43,895 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:45,143 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:45,511 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:20:46,455 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:20:47,831 INFO [tetris] Final score: 228 diff --git a/qlearning-results/a0.9-g0.5-e0.75-qlearning b/qlearning-results/a0.9-g0.5-e0.75-qlearning new file mode 100644 index 0000000..0e9295a --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:29:09,845 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.75 +222.3914500000019 +221.23785000000152 +221.2070000000005 +220.99320000000148 +220.4524500000023 +220.212800000001 +219.4436000000009 +219.82130000000024 +219.6764500000008 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:31,455 INFO [tetris] Final score: 182 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:36:32,063 INFO [tetris] Final score: 145 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:33,248 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:34,559 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:36:35,775 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:37,055 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:38,287 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:36:39,472 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:36:40,543 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:36:42,127 INFO [tetris] Final score: 235 diff --git a/qlearning-results/a0.9-g0.5-e0.9-approximateqlearning b/qlearning-results/a0.9-g0.5-e0.9-approximateqlearning new file mode 100644 index 0000000..e5236ad --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:20:47,839 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.9 +208.04999999999995 +208.609 +207.78050000000042 +208.00000000000014 +210.13800000000018 +210.38000000000008 +208.56700000000032 +209.25849999999997 +208.56649999999988 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:28,270 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:29,646 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:21:30,510 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:31,598 INFO [tetris] Final score: 222 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:32,478 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:21:33,342 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:35,166 INFO [tetris] Final score: 261 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:21:36,654 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:21:37,870 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:21:38,639 INFO [tetris] Final score: 201 diff --git a/qlearning-results/a0.9-g0.5-e0.9-qlearning b/qlearning-results/a0.9-g0.5-e0.9-qlearning new file mode 100644 index 0000000..63859f0 --- /dev/null +++ b/qlearning-results/a0.9-g0.5-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 07:36:58,638 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.5, exploration_rate = 0.9 +223.6104500000002 +222.78740000000064 +221.90855 +222.23975000000084 +221.89455000000146 +222.18560000000093 +221.7105000000009 +221.57145000000057 +221.6627000000013 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:43:58,065 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:00,017 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:01,249 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:44:02,400 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:03,296 INFO [tetris] Final score: 146 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:05,073 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:07,248 INFO [tetris] Final score: 292 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 07:44:08,000 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:44:09,985 INFO [tetris] Final score: 277 +Lost due to: LockOut +2020-04-21 07:44:11,553 INFO [tetris] Final score: 196 diff --git a/qlearning-results/a0.9-g0.9-e0.1-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.1-approximateqlearning new file mode 100644 index 0000000..3c56966 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:14:45,133 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.1 +203.64300000000046 +203.43700000000038 +203.12250000000026 +203.01400000000035 +224.54000000000008 +223.46150000000014 +203.1155000000002 +202.57250000000056 +223.0430000000007 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,382 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,558 INFO [tetris] Final score: 196 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,750 INFO [tetris] Final score: 194 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:25,958 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,150 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,326 INFO [tetris] Final score: 212 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,502 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:26,710 INFO [tetris] Final score: 218 +Lost due to: LockOut +2020-04-20 22:15:26,902 INFO [tetris] Final score: 204 +Lost due to: LockOut +2020-04-20 22:15:27,111 INFO [tetris] Final score: 186 diff --git a/qlearning-results/a0.9-g0.9-e0.1-qlearning b/qlearning-results/a0.9-g0.9-e0.1-qlearning new file mode 100644 index 0000000..23ee316 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:23:37,096 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.1 +220.11135000000203 +217.2223000000016 +215.9231500000015 +215.70630000000173 +215.53240000000181 +215.14065000000238 +214.87240000000259 +214.9684999999994 +214.60090000000287 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:31:12,948 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:13,604 INFO [tetris] Final score: 243 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:14,197 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:14,740 INFO [tetris] Final score: 184 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:31:15,333 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:16,068 INFO [tetris] Final score: 191 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:16,596 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:31:17,477 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:31:18,037 INFO [tetris] Final score: 158 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:31:18,820 INFO [tetris] Final score: 235 diff --git a/qlearning-results/a0.9-g0.9-e0.25-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.25-approximateqlearning new file mode 100644 index 0000000..caf5bfc --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:15:27,128 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.25 +199.9570000000003 +200.65800000000064 +200.2209999999998 +204.64649999999997 +202.45350000000005 +199.93400000000045 +202.02249999999927 +203.52300000000037 +201.23650000000026 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-20 22:15:56,578 INFO [tetris] Final score: 185 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:57,778 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:15:59,475 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:01,651 INFO [tetris] Final score: 236 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:16:02,130 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:16:04,370 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:16:05,074 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:16:05,698 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:06,754 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:07,795 INFO [tetris] Final score: 192 diff --git a/qlearning-results/a0.9-g0.9-e0.25-qlearning b/qlearning-results/a0.9-g0.9-e0.25-qlearning new file mode 100644 index 0000000..52f9bc2 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:31:29,931 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.25 +222.0286000000032 +220.3080500000027 +219.79000000000102 +219.1303000000014 +218.95010000000215 +218.40280000000223 +218.49035000000254 +218.78120000000075 +218.15075000000294 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:39:35,215 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:36,095 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:39:37,791 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:39:39,342 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:39,935 INFO [tetris] Final score: 189 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:39:40,751 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:42,606 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 06:39:43,183 INFO [tetris] Final score: 205 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:39:44,191 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:39:44,911 INFO [tetris] Final score: 205 diff --git a/qlearning-results/a0.9-g0.9-e0.5-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.5-approximateqlearning new file mode 100644 index 0000000..03b692c --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:16:07,806 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.5 +197.50150000000022 +197.6740000000003 +194.29700000000022 +195.83249999999992 +195.94850000000008 +195.7474999999998 +196.79299999999984 +194.7880000000001 +194.89900000000006 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:44,941 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:45,964 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:46,781 INFO [tetris] Final score: 190 +Lost due to: LockOut +2020-04-20 22:16:47,613 INFO [tetris] Final score: 248 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:16:48,653 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:49,117 INFO [tetris] Final score: 169 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:16:49,836 INFO [tetris] Final score: 124 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:50,748 INFO [tetris] Final score: 200 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:51,709 INFO [tetris] Final score: 154 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:16:52,220 INFO [tetris] Final score: 164 diff --git a/qlearning-results/a0.9-g0.9-e0.5-qlearning b/qlearning-results/a0.9-g0.9-e0.5-qlearning new file mode 100644 index 0000000..53c127f --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:39:57,620 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.5 +223.4274500000022 +222.68109999999945 +222.1315500000021 +221.90930000000327 +221.0418500000017 +221.2486500000039 +220.81480000000346 +220.83155000000053 +221.01915000000164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:01,447 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:03,240 INFO [tetris] Final score: 242 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:04,199 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:48:05,335 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:06,791 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:07,191 INFO [tetris] Final score: 149 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:08,263 INFO [tetris] Final score: 175 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:48:09,511 INFO [tetris] Final score: 257 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:10,471 INFO [tetris] Final score: 183 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:48:11,848 INFO [tetris] Final score: 281 diff --git a/qlearning-results/a0.9-g0.9-e0.75-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.75-approximateqlearning new file mode 100644 index 0000000..f08b67f --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:16:52,233 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.75 +199.4635000000001 +199.6769999999999 +199.09749999999994 +199.1510000000005 +201.18500000000063 +198.3290000000002 +198.23250000000021 +199.40199999999984 +199.57700000000008 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:24,323 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:17:25,139 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:25,556 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:26,179 INFO [tetris] Final score: 208 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:26,915 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:17:27,459 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:28,003 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:28,451 INFO [tetris] Final score: 187 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:29,395 INFO [tetris] Final score: 153 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:17:29,811 INFO [tetris] Final score: 199 diff --git a/qlearning-results/a0.9-g0.9-e0.75-qlearning b/qlearning-results/a0.9-g0.9-e0.75-qlearning new file mode 100644 index 0000000..2563fd2 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:48:26,417 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.75 +224.21695000000113 +223.70885000000177 +223.17375000000146 +223.02130000000142 +222.82329999999973 +222.55980000000082 +222.69185000000226 +222.44839999999962 +222.01355000000092 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:06,260 INFO [tetris] Final score: 220 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:07,652 INFO [tetris] Final score: 198 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:56:08,868 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:10,548 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:56:11,492 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:56:12,613 INFO [tetris] Final score: 213 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:56:14,388 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:56:15,348 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:56:17,460 INFO [tetris] Final score: 233 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:56:18,517 INFO [tetris] Final score: 178 diff --git a/qlearning-results/a0.9-g0.9-e0.9-approximateqlearning b/qlearning-results/a0.9-g0.9-e0.9-approximateqlearning new file mode 100644 index 0000000..0088f09 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:17:29,828 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.9 +208.85200000000037 +207.66850000000048 +206.61900000000006 +206.74500000000037 +209.90700000000004 +206.0430000000001 +208.84249999999983 +207.04249999999982 +207.14950000000013 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-20 22:18:07,730 INFO [tetris] Final score: 287 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:08,882 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:09,890 INFO [tetris] Final score: 217 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:11,123 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:18:12,114 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:12,914 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:13,507 INFO [tetris] Final score: 192 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:18:14,690 INFO [tetris] Final score: 252 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:18:15,971 INFO [tetris] Final score: 203 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:18:16,707 INFO [tetris] Final score: 192 diff --git a/qlearning-results/a0.9-g0.9-e0.9-qlearning b/qlearning-results/a0.9-g0.9-e0.9-qlearning new file mode 100644 index 0000000..801a890 --- /dev/null +++ b/qlearning-results/a0.9-g0.9-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:56:34,782 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 0.9, exploration_rate = 0.9 +224.57260000000076 +223.3695500000009 +224.40370000000092 +224.05635000000188 +223.88540000000106 +224.18705000000156 +223.52310000000045 +223.81590000000122 +223.75850000000077 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:45,094 INFO [tetris] Final score: 241 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:46,486 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:48,166 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 07:03:50,102 INFO [tetris] Final score: 286 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:51,046 INFO [tetris] Final score: 190 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:51,830 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:53,733 INFO [tetris] Final score: 287 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:54,597 INFO [tetris] Final score: 209 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 07:03:55,718 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 07:03:57,270 INFO [tetris] Final score: 240 diff --git a/qlearning-results/a0.9-g1.0-e0.1-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.1-approximateqlearning new file mode 100644 index 0000000..15d1d84 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.1-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 21:35:02,976 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.1 +343.0160000000001 +374.1515000000009 +371.97900000000055 +375.69750000000033 +377.08049999999946 +373.4639999999987 +374.8835000000006 +371.55649999999997 +373.81750000000017 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:57:40,169 INFO [tetris] Final score: 417 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:58:12,489 INFO [tetris] Final score: 428 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 21:58:28,729 INFO [tetris] Final score: 384 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:58:42,937 INFO [tetris] Final score: 295 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 21:59:11,369 INFO [tetris] Final score: 321 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:59:38,073 INFO [tetris] Final score: 317 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 21:59:56,201 INFO [tetris] Final score: 335 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:00:28,905 INFO [tetris] Final score: 437 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:00:47,177 INFO [tetris] Final score: 341 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:01:11,673 INFO [tetris] Final score: 369 diff --git a/qlearning-results/a0.9-g1.0-e0.1-qlearning b/qlearning-results/a0.9-g1.0-e0.1-qlearning new file mode 100644 index 0000000..10e57da --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.1-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:25:49,309 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.1 +219.4535000000012 +220.30670000000012 +222.87990000000056 +224.77710000000152 +223.73930000000172 +222.64170000000138 +225.08065000000178 +225.3609500000005 +225.33345000000105 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:28,489 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:42:30,185 INFO [tetris] Final score: 199 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:32,426 INFO [tetris] Final score: 287 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:35,433 INFO [tetris] Final score: 271 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:42:37,881 INFO [tetris] Final score: 211 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:39,145 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:42:40,905 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:43,513 INFO [tetris] Final score: 174 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 05:42:46,217 INFO [tetris] Final score: 197 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:42:48,633 INFO [tetris] Final score: 206 diff --git a/qlearning-results/a0.9-g1.0-e0.25-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.25-approximateqlearning new file mode 100644 index 0000000..6a3c934 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.25-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:01:11,684 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.25 +280.8345000000005 +297.03800000000035 +296.62900000000025 +297.59599999999983 +299.1450000000002 +296.5395 +296.73250000000064 +296.8154999999996 +295.2065000000003 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:07:43,232 INFO [tetris] Final score: 266 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-20 22:07:49,328 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:07:53,103 INFO [tetris] Final score: 265 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:07:58,143 INFO [tetris] Final score: 284 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:04,368 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:08:09,695 INFO [tetris] Final score: 277 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:19,152 INFO [tetris] Final score: 353 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:30,304 INFO [tetris] Final score: 367 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:08:40,560 INFO [tetris] Final score: 374 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-20 22:08:45,663 INFO [tetris] Final score: 290 diff --git a/qlearning-results/a0.9-g1.0-e0.25-qlearning b/qlearning-results/a0.9-g1.0-e0.25-qlearning new file mode 100644 index 0000000..5cfcf67 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.25-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:43:05,114 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.25 +221.42935000000287 +223.57179999999948 +222.4067000000011 +223.67315000000272 +222.3455500000012 +224.16825000000145 +223.38455000000144 +222.16165000000075 +222.65675000000027 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:46,939 INFO [tetris] Final score: 267 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:48,811 INFO [tetris] Final score: 245 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 05:55:49,899 INFO [tetris] Final score: 151 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:51,371 INFO [tetris] Final score: 216 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:52,796 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:54,076 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:55:55,323 INFO [tetris] Final score: 167 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:55:58,508 INFO [tetris] Final score: 296 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 05:56:00,491 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 05:56:01,772 INFO [tetris] Final score: 246 diff --git a/qlearning-results/a0.9-g1.0-e0.5-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.5-approximateqlearning new file mode 100644 index 0000000..e54c010 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.5-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:08:45,677 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.5 +250.91950000000043 +256.9545 +256.5619999999999 +257.13050000000015 +257.37300000000016 +256.96200000000056 +259.95699999999965 +255.26400000000027 +254.34550000000013 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:17,118 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:11:19,645 INFO [tetris] Final score: 255 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:11:22,814 INFO [tetris] Final score: 312 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:26,253 INFO [tetris] Final score: 319 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:30,813 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:33,229 INFO [tetris] Final score: 224 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:36,285 INFO [tetris] Final score: 232 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:11:37,870 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:11:41,550 INFO [tetris] Final score: 276 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:11:44,846 INFO [tetris] Final score: 253 diff --git a/qlearning-results/a0.9-g1.0-e0.5-qlearning b/qlearning-results/a0.9-g1.0-e0.5-qlearning new file mode 100644 index 0000000..f6e9f30 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.5-qlearning @@ -0,0 +1,30 @@ +2020-04-21 05:56:18,574 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.5 +223.14630000000096 +222.4662000000006 +222.19985000000216 +221.45400000000015 +221.35775000000123 +221.61499999999967 +221.8509499999996 +221.81945000000104 +220.82925000000114 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:06:21,197 INFO [tetris] Final score: 253 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:23,725 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 5, y: 19 }) +2020-04-21 06:06:24,509 INFO [tetris] Final score: 195 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:25,853 INFO [tetris] Final score: 254 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:27,149 INFO [tetris] Final score: 193 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:06:29,086 INFO [tetris] Final score: 262 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:30,397 INFO [tetris] Final score: 204 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:06:32,925 INFO [tetris] Final score: 227 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:35,469 INFO [tetris] Final score: 315 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:06:36,909 INFO [tetris] Final score: 200 diff --git a/qlearning-results/a0.9-g1.0-e0.75-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.75-approximateqlearning new file mode 100644 index 0000000..dfb68a0 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.75-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:11:44,860 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.75 +233.65150000000003 +235.6294999999998 +235.5679999999999 +237.1415 +236.70450000000005 +235.77050000000003 +235.82599999999977 +236.3489999999998 +235.507 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:13:07,975 INFO [tetris] Final score: 215 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-20 22:13:09,718 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:13:11,542 INFO [tetris] Final score: 299 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:14,598 INFO [tetris] Final score: 270 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:13:16,630 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:18,215 INFO [tetris] Final score: 229 +Lost due to: BlockOut(Position { x: 6, y: 19 }) +2020-04-20 22:13:20,711 INFO [tetris] Final score: 244 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:23,063 INFO [tetris] Final score: 256 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:13:24,806 INFO [tetris] Final score: 247 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:13:26,999 INFO [tetris] Final score: 278 diff --git a/qlearning-results/a0.9-g1.0-e0.75-qlearning b/qlearning-results/a0.9-g1.0-e0.75-qlearning new file mode 100644 index 0000000..fe4ff8d --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.75-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:06:53,850 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.75 +223.03950000000032 +222.67955000000146 +222.60880000000236 +222.78549999999882 +222.02130000000193 +222.8154000000009 +222.58920000000154 +222.558650000001 +222.3293000000018 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:15:09,132 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:10,556 INFO [tetris] Final score: 164 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:12,268 INFO [tetris] Final score: 237 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:13,724 INFO [tetris] Final score: 201 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:15,307 INFO [tetris] Final score: 238 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:15:18,812 INFO [tetris] Final score: 332 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:20,252 INFO [tetris] Final score: 170 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:22,140 INFO [tetris] Final score: 240 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:15:24,043 INFO [tetris] Final score: 261 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-21 06:15:25,772 INFO [tetris] Final score: 233 diff --git a/qlearning-results/a0.9-g1.0-e0.9-approximateqlearning b/qlearning-results/a0.9-g1.0-e0.9-approximateqlearning new file mode 100644 index 0000000..95f49f9 --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.9-approximateqlearning @@ -0,0 +1,30 @@ +2020-04-20 22:13:27,013 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.9 +226.13749999999987 +227.05799999999994 +226.83550000000014 +227.24349999999953 +228.87400000000008 +227.81849999999966 +227.3485000000002 +227.08050000000034 +228.37950000000035 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:30,750 INFO [tetris] Final score: 206 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-20 22:14:32,110 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:32,862 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:35,134 INFO [tetris] Final score: 332 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:36,654 INFO [tetris] Final score: 250 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:38,702 INFO [tetris] Final score: 172 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:39,807 INFO [tetris] Final score: 226 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:41,582 INFO [tetris] Final score: 207 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-20 22:14:43,502 INFO [tetris] Final score: 218 +Lost due to: BlockOut(Position { x: 3, y: 20 }) +2020-04-20 22:14:45,118 INFO [tetris] Final score: 286 diff --git a/qlearning-results/a0.9-g1.0-e0.9-qlearning b/qlearning-results/a0.9-g1.0-e0.9-qlearning new file mode 100644 index 0000000..152fd0e --- /dev/null +++ b/qlearning-results/a0.9-g1.0-e0.9-qlearning @@ -0,0 +1,30 @@ +2020-04-21 06:15:43,097 INFO [tetris::actors::qlearning] Training an actor with learning_rate = 0.9, discount_rate = 1, exploration_rate = 0.9 +224.08180000000112 +223.73260000000244 +223.56205000000148 +223.0561499999997 +223.4469000000014 +223.3565500000009 +223.6488000000011 +223.41785000000053 +223.7366000000004 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:07,871 INFO [tetris] Final score: 239 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:23:09,871 INFO [tetris] Final score: 214 +Lost due to: BlockOut(Position { x: 3, y: 19 }) +2020-04-21 06:23:11,823 INFO [tetris] Final score: 235 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:12,575 INFO [tetris] Final score: 179 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:13,647 INFO [tetris] Final score: 166 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:14,703 INFO [tetris] Final score: 234 +Lost due to: BlockOut(Position { x: 5, y: 20 }) +2020-04-21 06:23:15,454 INFO [tetris] Final score: 188 +Lost due to: BlockOut(Position { x: 4, y: 19 }) +2020-04-21 06:23:16,911 INFO [tetris] Final score: 225 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:18,271 INFO [tetris] Final score: 228 +Lost due to: BlockOut(Position { x: 4, y: 20 }) +2020-04-21 06:23:19,327 INFO [tetris] Final score: 186 diff --git a/qlearning-results/test b/qlearning-results/test new file mode 100755 index 0000000..4ebb3df --- /dev/null +++ b/qlearning-results/test @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +echo "Warning, high epilson values will take up to 30GB to run!" +echo "It is a good idea to disable these values." + +for a in 0.1 0.5 0.7 0.9; do + for g in 1.0 0.9 0.5 0.1; do + for e in 0.1 0.25 0.5 0.75 0.9; do + for agent in qlearning approximateqlearning; do + echo "alpha $a gamma $g epsilon $e agent $agent" + file="a$a-g$g-e$e-$agent" + if [ -f $file ]; then + echo "skipping $file, exists" + else + ./tetris train $agent -a $a -g $g -e $e -n 200000 >> "$file" + fi + done + done + done +done diff --git a/qlearning-results.tar b/qlearning-results/tetris old mode 100644 new mode 100755 similarity index 93% rename from qlearning-results.tar rename to qlearning-results/tetris index 837e58c..a811df8 Binary files a/qlearning-results.tar and b/qlearning-results/tetris differ