21 files changed, 222 insertions(+), 239 deletions(-)
M poetry.lock
M pyproject.toml
M sweep_ai/__main__.py
M sweep_ai/ai.py
R assets/Grid.png => sweep_ai/assets/Grid.png
R assets/empty.png => sweep_ai/assets/empty.png
R assets/flag.png => sweep_ai/assets/flag.png
R assets/grid1.png => sweep_ai/assets/grid1.png
R assets/grid2.png => sweep_ai/assets/grid2.png
R assets/grid3.png => sweep_ai/assets/grid3.png
R assets/grid4.png => sweep_ai/assets/grid4.png
R assets/grid5.png => sweep_ai/assets/grid5.png
R assets/grid6.png => sweep_ai/assets/grid6.png
R assets/grid7.png => sweep_ai/assets/grid7.png
R assets/grid8.png => sweep_ai/assets/grid8.png
R assets/hint.png => sweep_ai/assets/hint.png
R assets/mine.png => sweep_ai/assets/mine.png
D sweep_ai/config-feedforward
M sweep_ai/window.py
A sweep_ai/x.npy
A sweep_ai/y.npy
M poetry.lock => poetry.lock +21 -21
@@ 73,7 73,7 @@ python-versions = "*"
[[package]]
name = "charset-normalizer"
-version = "2.0.10"
+version = "2.0.11"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
@@ 129,7 129,7 @@ woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"]
[[package]]
name = "gast"
-version = "0.4.0"
+version = "0.5.3"
description = "Python AST that abstracts the underlying Python version"
category = "main"
optional = false
@@ 771,7 771,7 @@ optional = false
python-versions = "*"
[[package]]
-name = "tensorflow"
+name = "tensorflow-cpu"
version = "2.8.0rc1"
description = "TensorFlow is an open source machine learning framework for everyone."
category = "main"
@@ 917,7 917,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-
[metadata]
lock-version = "1.1"
python-versions = ">=3.8,<3.11"
-content-hash = "a8736cc0a5e18e65daf72234f2284f5d88807c703a13c612b9a7eb28ce966327"
+content-hash = "8501382b6faeb8e7e7ca4fca354ee7736ee2bccf39bb71f5b7c1ea15b9f1db31"
[metadata.files]
absl-py = [
@@ 949,8 949,8 @@ certifi = [
{file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
]
charset-normalizer = [
- {file = "charset-normalizer-2.0.10.tar.gz", hash = "sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd"},
- {file = "charset_normalizer-2.0.10-py3-none-any.whl", hash = "sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455"},
+ {file = "charset-normalizer-2.0.11.tar.gz", hash = "sha256:98398a9d69ee80548c762ba991a4728bfc3836768ed226b3945908d1a688371c"},
+ {file = "charset_normalizer-2.0.11-py3-none-any.whl", hash = "sha256:2842d8f5e82a1f6aa437380934d5e1cd4fcf2003b06fed6940769c164a480a45"},
]
colorama = [
{file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
@@ 969,8 969,8 @@ fonttools = [
{file = "fonttools-4.29.0.zip", hash = "sha256:f4834250db2c9855c3385459579dbb5cdf74349ab059ea0e619359b65ae72037"},
]
gast = [
- {file = "gast-0.4.0-py3-none-any.whl", hash = "sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4"},
- {file = "gast-0.4.0.tar.gz", hash = "sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1"},
+ {file = "gast-0.5.3-py3-none-any.whl", hash = "sha256:211aac1e58c167b25d3504998f2db694454a24bb1fb1225bce99420166f21d6a"},
+ {file = "gast-0.5.3.tar.gz", hash = "sha256:cfbea25820e653af9c7d1807f659ce0a0a9c64f2439421a7bba4f0983f532dea"},
]
google-auth = [
{file = "google-auth-2.5.0.tar.gz", hash = "sha256:6577bbf990ef342a24e12e0c8e9d364af6642acdf206c9045bdb8e039fb4fec9"},
@@ 1586,19 1586,19 @@ tensorboard-data-server = [
tensorboard-plugin-wit = [
{file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"},
]
-tensorflow = [
- {file = "tensorflow-2.8.0rc1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:53a28fe262b1d859a7a0c139997e333f73f0b4ff3f196c85c171e93d2110fdf5"},
- {file = "tensorflow-2.8.0rc1-cp310-cp310-manylinux2010_x86_64.whl", hash = "sha256:cab5b637e21f45d35a0b904869eedd82775de53e5e0e96026f03fbf725a93821"},
- {file = "tensorflow-2.8.0rc1-cp310-cp310-win_amd64.whl", hash = "sha256:85e6435d8a8a0199e2c91311c7de11efcd885a3d3731da289a3b1b026d0bf9e1"},
- {file = "tensorflow-2.8.0rc1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:011d2638256a54adb28119faabbaefdd5c1cc1a1f8e9fc8a8e5ff02b5e321791"},
- {file = "tensorflow-2.8.0rc1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:9807e3b35c54a1fa800a21cc99e4d1eb7444153e5a55c259a5f82787a003d81e"},
- {file = "tensorflow-2.8.0rc1-cp37-cp37m-win_amd64.whl", hash = "sha256:389366a8d0bc0a1581a390e35a4ee8d9e1f104973f977bcbf7b19cd076098706"},
- {file = "tensorflow-2.8.0rc1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:2410f084bff865af9a6008f226585203424b9554cec2465754e1340110f114c9"},
- {file = "tensorflow-2.8.0rc1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:713414453cb7d3194d2cf8b8e1b97fac965b9de4800df6a91c3eb9ab88905633"},
- {file = "tensorflow-2.8.0rc1-cp38-cp38-win_amd64.whl", hash = "sha256:a9849ce595db44c4fd7efc4c400f49174948bf618a69926d4ae9e27519d25c58"},
- {file = "tensorflow-2.8.0rc1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:15e2fcb048bcf95789da7eb8faa47b63783ca84dcc619a36e1ea068403291514"},
- {file = "tensorflow-2.8.0rc1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:a4badfe4bf37a51221b0fcd20f981f4c0df8a588665023376d25f79394424aae"},
- {file = "tensorflow-2.8.0rc1-cp39-cp39-win_amd64.whl", hash = "sha256:b7b0135a65bea7e2b6f7b21dfba3749920113a44d15cca882b6a767084720758"},
+tensorflow-cpu = [
+ {file = "tensorflow_cpu-2.8.0rc1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:00e36cbfabd07a2e8241f5a054902e4c39c435303cc16d5b6a06ea5a25ee21c2"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp310-cp310-manylinux2010_x86_64.whl", hash = "sha256:d583ec815420d613e8eaed3b690aad8e05af48bc567442e76f4e234c07233e97"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp310-cp310-win_amd64.whl", hash = "sha256:0b30c8605cc34bfdc4071cffa0984938410d0966a6a02140f79c4fe63305cae5"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:e80244f66cffeb2bf17bf054d88660d54732d24fa903364dd106ddf1a9676cec"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:906697c54cf5701765d13803922c7ef4b857a52438ccdea0de574a40064882e5"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp37-cp37m-win_amd64.whl", hash = "sha256:d3bd6382d832abed841055b357011b615cc028404f1770762d7dd4d98d61115c"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9e511f7eb8b6e56406b2474dd3b9b0dc6583395b653c614f9addf717ef98565d"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:919044865916381624e3688894a69c83a18437b250bd0fcc98989207d99ba0e1"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp38-cp38-win_amd64.whl", hash = "sha256:df712e538bbc2e8a407db03fe5b2f45d5f229aaaf94dac091bbbfc5048bf02a0"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:07b8720df5fbcfac4a921d316c95a21967e39efa4cfc2897e51046b1d2e9b556"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:a159ba75f8126b1316546b8f468b56a6066e1aa5e53dc009f9b4962c73d7ae07"},
+ {file = "tensorflow_cpu-2.8.0rc1-cp39-cp39-win_amd64.whl", hash = "sha256:8f7fc7e5509dce26490bb5ee5c0488722d1f4f2f0c5a6c1d036c98dc1048ee82"},
]
tensorflow-io-gcs-filesystem = [
{file = "tensorflow_io_gcs_filesystem-0.23.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:80e2078b94ba5f140b5d366ee3b07b493760d2c76d7426ec417f7be2795a0799"},
M pyproject.toml => pyproject.toml +1 -1
@@ 9,9 9,9 @@ license = "MIT"
python = ">=3.8,<3.11"
pygame-menu = "^4.2.2"
numpy = "^1.22.0"
-tensorflow = "^2.8.0rc1"
pygame = "^2.1.2"
sklearn = "^0.0"
+tensorflow-cpu = "2.8.0rc1"
[tool.poetry.dev-dependencies]
yapf = "^0.32.0"
M sweep_ai/__main__.py => sweep_ai/__main__.py +32 -3
@@ 1,5 1,34 @@
"""Game entry point."""
-from .window import main
+from argparse import ArgumentParser
+from shutil import rmtree
-if __name__ == '__main__':
- main()
+from .ai import Player
+from .window import Game
+
+def main():
+ """Main gameplay entry point."""
+ parser = ArgumentParser(description='Minesweeper with AI hints')
+ parser.add_argument(
+ '--clean-cache',
+ action='store_true',
+ help=f'Clean the neural network cache dir (at {Player.CACHEDIR}).'
+ )
+ parser.add_argument(
+ '--no-cache',
+ action='store_true',
+ help='Disable network caching.'
+ )
+ parser.add_argument(
+ '--plot',
+ action='store_true',
+ help='Save loss plot after the network is trained.'
+ )
+ args = parser.parse_args()
+ if args.clean_cache:
+ rmtree(Player.CACHEDIR, ignore_errors=True)
+ if args.no_cache:
+ Player.CACHE = False
+ if args.plot:
+ Player.PLOT = True
+ game = Game()
+ game.loop()
M sweep_ai/ai.py => sweep_ai/ai.py +136 -107
@@ 1,14 1,16 @@
"""Neural network functionality module."""
-from typing import Optional, Tuple
+import os
+from pathlib import Path
+from typing import Tuple
import numpy as np
from matplotlib import pyplot as plt
-from sklearn.preprocessing import StandardScaler
-from sklearn.utils import class_weight, shuffle
from tensorflow import keras
from .logic import State
+os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
+
class Player:
"""The AI player, capable of making informed decisions.
@@ 17,134 19,161 @@ class Player:
brain: the neural network powering the decision-making process.
size: the size of the board on which the player has been trained.
"""
+ CACHEDIR = Path(
+ os.environ.get('SWEEP_BRAIN_CACHE')
+ or os.environ.get('XDG_CACHE_HOME') or '.',
+ 'sweep_brains',
+ 'conv',
+ )
+ CACHE = True
+ PLOT = False
+ X_TRAIN = Path(__file__).parent.joinpath('x.npy')
+ Y_TRAIN = Path(__file__).parent.joinpath('y.npy')
def __init__(self, size: int, difficulty: float):
"""Constructs a new `Player`."""
self.size = size
self.difficulty = difficulty
self.trained = False
- self.brain = keras.models.Sequential(
- [
- keras.layers.InputLayer(input_shape=(24, )),
- keras.layers.Dense(120, activation='relu'),
- keras.layers.Dense(60, activation='relu'),
- keras.layers.Dense(30, activation='relu'),
- keras.layers.Dense(15, activation='relu'),
- keras.layers.Dense(1, activation='sigmoid'),
- ])
- self.brain.compile(
- keras.optimizers.Adam(learning_rate=0.001),
- loss='binary_crossentropy',
- metrics=['binary_accuracy'],
- )
-
- @staticmethod
- def surround(state: State, x: int, y: int) -> np.ndarray:
- """Return an ndarray representing the 35 blocks surrounding `(x, y)`.
- If the neighbor is empty (or is beyond the border) the array contains
- 0. If it's hidden the state contains -1. If it's revealed, but near a
- bomb then it's value is the number of bombs near it.
- """
- surround = np.zeros((24, ), dtype=float)
- for x_n, y_n in state.neighbors(x, y, radius=2):
- if state.hidden[x_n, y_n]:
- surround[x_n + y_n] = -1
- if state.revealed[x_n, y_n] and state.near[x_n, y_n] > 0:
- surround[x_n + y_n] = state.near[x_n, y_n]
- return surround
-
- def predict(self, state: State, x: int, y: int) -> float:
- """Predict if the tile `(x, y)` is safe."""
- return float(
- self.brain.predict(
- np.array([self.surround(state, x, y)]),
- )[0][0],
+ self.brain_location = self.CACHEDIR.joinpath(
+ f'S{self.size}_D{self.difficulty}',
)
+ if self.CACHE and self.brain_location.exists():
+ self.brain = keras.models.load_model(self.brain_location)
+ self.trained = True
+ else:
+ self.brain = keras.models.Sequential(
+ [
+ keras.layers.InputLayer(
+ input_shape=(
+ self.size,
+ self.size,
+ )),
+ keras.layers.Conv1D(
+ filters=self.size // 2,
+ kernel_size=10,
+ padding='same',
+ strides=2,
+ activation='relu',
+ ), # -> 0.5size x 0.5size
+ keras.layers.Conv1DTranspose(
+ filters=self.size,
+ kernel_size=10,
+ padding='same',
+ strides=2,
+ activation='relu',
+ ), # -> size x size
+ keras.layers.Conv1D(
+ filters=self.size,
+ kernel_size=10,
+ padding='same',
+ strides=1,
+ activation=None,
+ ), # -> size x size
+ ])
+ self.brain.compile(
+ keras.optimizers.Adam(learning_rate=0.001),
+ loss='mse',
+ )
@staticmethod
- def prosess_tile(state: State, x: int, y: int) -> bool:
- """Returns `true` if `(x, y)` is suitable for making a move."""
- return not (state.revealed[x, y] or state.flagged[x, y]) and any(
- state.revealed[x_n, y_n] for x_n, y_n in state.neighbors(x, y))
-
- def move(self, state: State) -> Optional[Tuple[int, int]]:
- """Make a move.
+ def expect(state: State, x: int, y: int) -> float:
+ """What should the network output at `(x, y)` be?
- Returns `None` if the brain has not been compiled yet , and a suggested
- `(x, y)` move otherwise.
+ Returns `# tiles / # safe & hidden ` if `(x, y)` is hidden, next to
+ a revealed spot and safe, `# tiles / # bombs` if `(x, y)` is a bomb,
+ `# tiles / # revealed ` otherwise.
"""
- if self.trained:
- try:
- pos = max(
- [
- (x, y)
- for x in range(state.size)
- for y in range(state.size)
- if self.prosess_tile(state, x, y)
- ],
- key=lambda pos: self.predict(state, *pos),
- )
- return pos
- except ValueError:
- return None
- return None
+ if state.safe[x, y] and state.hidden[x, y]:
+ for x_n, y_n in state.neighbors(x, y):
+ if state.revealed[x_n, y_n]:
+ return state.size * state.size / (
+ state.safe_n - state.revealed_n)
+ return state.size * state.size / (state.revealed_n + 1)
+
+ def training_data(self, state: State) -> Tuple[np.ndarray, np.ndarray]:
+ """Returns a tuple of `(neural network input, expected output)`."""
+ if self.size != state.size:
+ raise ValueError('Player not fit for state')
+ x_train = np.where(
+ state.revealed == 1,
+ state.near,
+ -np.ones_like(state.near),
+ ).astype(float)
+ y_train = np.zeros_like(state.revealed, dtype=float)
+ for x in range(state.size):
+ for y in range(state.size):
+ y_train[x, y] = self.expect(state, x, y)
+ return x_train, y_train
+
+ def move(self, state: State):
+ """Choose the most advantageous `(x, y)`.
+
+ Returns a tuple of `(x, y)` the player believes to be safest.
+ """
+ output = self.brain.predict(
+ np.stack([self.training_data(state)[0]]),
+ )[0]
+ output = np.where(
+ (state.revealed == 1) | (state.flagged == 1),
+ 0,
+ output,
+ )
+ pos = np.unravel_index(np.argmax(output, axis=None), output.shape)
+ return pos
def train(self):
"""Recrete and train the AI brain of this player."""
self.brain.summary()
- x_train = []
- y_train = []
- while len(x_train) < 2e4:
- state = State(self.size, 0.2)
- # CLick on a guaranteed empty space
- x_click, y_click = np.transpose(np.nonzero(state.near == 0))[0]
- state.click(x_click, y_click)
- # Play the game until won
- while state.won is None:
- for x in range(self.size):
- for y in range(self.size):
- if self.prosess_tile(state, x, y):
- # The neural network input - 24 surrouding tiles
- sur = self.surround(state, x, y)
- x_train.append(sur)
- # Network should output 1 on safe tiles
- y_train.append(state.safe[x, y])
- state.click(*state.cheat())
-
- x_train = StandardScaler().fit_transform(np.array(x_train))
- y_train = np.array(y_train)
- x_train, y_train = shuffle(x_train, y_train)
- weights = dict(
- zip(
- np.unique(y_train),
- class_weight.compute_class_weight(
- 'balanced',
- classes=np.unique(y_train),
- y=y_train,
- ),
- ),
- )
+ if self.X_TRAIN.exists() and self.Y_TRAIN.exists():
+ x_train = np.load(self.X_TRAIN)
+ y_train = np.load(self.Y_TRAIN)
+ else:
+ x_train = np.ndarray((0, 10, 10))
+ y_train = np.ndarray((0, 10, 10))
+ while len(x_train) < 3e4:
+ state = State(self.size, self.difficulty)
+ # CLick on a guaranteed empty space
+ x_click, y_click = np.transpose(np.nonzero(state.near == 0))[0]
+ state.click(x_click, y_click)
+ # Play the game until won
+ while state.won is None:
+ training_data = self.training_data(state)
+ x_train = np.append(
+ x_train,
+ training_data[0][None, :],
+ axis=0,
+ )
+ y_train = np.append(
+ y_train,
+ training_data[1][None, :],
+ axis=0,
+ )
+ state.click(*state.cheat())
+ print(len(x_train))
+ np.save('x', x_train)
+ np.save('y', y_train)
history = self.brain.fit(
x_train,
y_train,
- class_weight=weights,
- epochs=20,
+ epochs=50,
batch_size=100,
verbose=1,
- validation_split=0.2,
+ validation_split=0.25,
use_multiprocessing=True,
shuffle=True,
)
- _, axs = plt.subplots(1, 2)
- axs[0].set_title('Accuracy')
- axs[0].plot(history.history['val_binary_accuracy'])
- axs[0].plot(history.history['binary_accuracy'])
-
- axs[1].set_title('Loss')
- axs[1].plot(history.history['val_loss'])
- axs[1].plot(history.history['loss'])
- plt.savefig('fit.pdf')
+
self.trained = True
+
+ if self.CACHE:
+ self.brain.save(self.brain_location)
+
+ if self.PLOT:
+ plt.title('Loss')
+ plt.plot(history.history['val_loss'])
+ plt.plot(history.history['loss'])
+ plt.savefig(f'loss_S{self.size}_D{self.difficulty}.pdf')
R assets/Grid.png => sweep_ai/assets/Grid.png +0 -0
R assets/empty.png => sweep_ai/assets/empty.png +0 -0
R assets/flag.png => sweep_ai/assets/flag.png +0 -0
R assets/grid1.png => sweep_ai/assets/grid1.png +0 -0
R assets/grid2.png => sweep_ai/assets/grid2.png +0 -0
R assets/grid3.png => sweep_ai/assets/grid3.png +0 -0
R assets/grid4.png => sweep_ai/assets/grid4.png +0 -0
R assets/grid5.png => sweep_ai/assets/grid5.png +0 -0
R assets/grid6.png => sweep_ai/assets/grid6.png +0 -0
R assets/grid7.png => sweep_ai/assets/grid7.png +0 -0
R assets/grid8.png => sweep_ai/assets/grid8.png +0 -0
R assets/hint.png => sweep_ai/assets/hint.png +0 -0
R assets/mine.png => sweep_ai/assets/mine.png +0 -0
D sweep_ai/config-feedforward => sweep_ai/config-feedforward +0 -82
@@ 1,82 0,0 @@
-#--- parameters for the XOR-2 experiment ---#
-
-[NEAT]
-fitness_criterion = max
-fitness_threshold = 3.9
-pop_size = 150
-reset_on_extinction = False
-
-[DefaultGenome]
-# node activation options
-activation_default = sigmoid
-activation_mutate_rate = 0.0
-activation_options = sigmoid
-
-# node aggregation options
-aggregation_default = sum
-aggregation_mutate_rate = 0.0
-aggregation_options = sum
-
-# node bias options
-bias_init_mean = 0.0
-bias_init_stdev = 1.0
-bias_max_value = 30.0
-bias_min_value = -30.0
-bias_mutate_power = 0.5
-bias_mutate_rate = 0.7
-bias_replace_rate = 0.1
-
-# genome compatibility options
-compatibility_disjoint_coefficient = 1.0
-compatibility_weight_coefficient = 0.5
-
-# connection add/remove rates
-conn_add_prob = 0.5
-conn_delete_prob = 0.5
-
-# connection enable options
-enabled_default = True
-enabled_mutate_rate = 0.01
-
-feed_forward = True
-initial_connection = full
-
-# node add/remove rates
-node_add_prob = 0.2
-node_delete_prob = 0.2
-
-# network parameters
-num_hidden = 0
-num_inputs = 2
-num_outputs = 1
-
-# node response options
-response_init_mean = 1.0
-response_init_stdev = 0.0
-response_max_value = 30.0
-response_min_value = -30.0
-response_mutate_power = 0.0
-response_mutate_rate = 0.0
-response_replace_rate = 0.0
-
-# connection weight options
-weight_init_mean = 0.0
-weight_init_stdev = 1.0
-weight_max_value = 30
-weight_min_value = -30
-weight_mutate_power = 0.5
-weight_mutate_rate = 0.8
-weight_replace_rate = 0.1
-
-[DefaultSpeciesSet]
-compatibility_threshold = 3.0
-
-[DefaultStagnation]
-species_fitness_func = max
-max_stagnation = 20
-species_elitism = 2
-
-[DefaultReproduction]
-elitism = 2
-survival_threshold = 0.2
-
M sweep_ai/window.py => sweep_ai/window.py +32 -25
@@ 1,6 1,7 @@
"""Window handling module."""
import sys
from threading import Thread
+from pathlib import Path
from typing import Optional, Tuple
import pygame
@@ 11,7 12,6 @@ from .logic import State
# pylint: disable=invalid-name
-
class Game:
"""Game class."""
DIFFICULTY = {
@@ 41,6 41,7 @@ class Game:
self.surface = pygame.display.set_mode(
(self.display_width, self.display_height),
)
+ pygame.display.set_caption('Sweep AI')
self.events = []
self.player = Player(self.size, 0.2)
@@ 48,17 49,33 @@ class Game:
self.hint_thread = Thread(target=lambda _: _)
self.sprites = {}
- self.sprites['flag'] = pygame.image.load('assets/flag.png')
- self.sprites['hidden'] = pygame.image.load('assets/Grid.png')
- self.sprites['mine'] = pygame.image.load('assets/mine.png')
- self.sprites[0] = pygame.image.load('assets/empty.png')
+ adir = Path(__file__).parent.joinpath('assets')
+ self.sprites['flag'] = pygame.image.load(adir.joinpath('flag.png'))
+ self.sprites['hidden'] = pygame.image.load(adir.joinpath('Grid.png'))
+ self.sprites['mine'] = pygame.image.load(adir.joinpath('mine.png'))
+ self.sprites[0] = pygame.image.load(adir.joinpath('empty.png'))
for i in range(1, 9):
- self.sprites[i] = pygame.image.load(f'assets/grid{i}.png')
+ self.sprites[i] = pygame.image.load(adir.joinpath(f'grid{i}.png'))
self.timer = pygame.time.Clock()
self.configure_menu()
self.reset()
+ @property
+ def display_width(self):
+ """Displayed window width."""
+ return self._grid_s * self.size + self._border * 2 + self._menu_width
+
+ @property
+ def display_height(self):
+ """Displayed window height."""
+ return self._grid_s * self.size + self._border * 2
+
+ @property
+ def menu_x(self):
+ """The horizontal menu position."""
+ return self._grid_s * self.size + self._border * 2
+
def configure_menu(self):
"""Configures the game menu."""
theme = pygame_menu.Theme(
@@ 152,26 169,14 @@ class Game:
def set_difficulty(self, *args):
"""Set a new difficulty."""
self.difficulty = args[1]
+ self.player = Player(self.size, self.difficulty)
self.reset()
- @property
- def display_width(self):
- """Displayed window width."""
- return self._grid_s * self.size + self._border * 2 + self._menu_width
-
- @property
- def display_height(self):
- """Displayed window height."""
- return self._grid_s * self.size + self._border * 2
-
- @property
- def menu_x(self):
- """The horizontal menu position."""
- return self._grid_s * self.size + self._border * 2
def set_size(self, *args):
"""Set a new board size."""
self.size = args[1]
+ self.player = Player(self.size, self.difficulty)
self.surface = pygame.display.set_mode(
(self.display_width, self.display_height),
)
@@ 180,15 185,15 @@ class Game:
def get_hint(self):
"""Highlight the position suggested by the AI player."""
- # Hint-getting callback
def hint_cb():
- if not self.train_thread.is_alive():
- if not self.player.trained:
+ if not self.player.trained:
+ if not self.train_thread.is_alive():
self.train_thread = Thread(target=self.player.train)
self.train_thread.start()
+ else:
+ self.train_thread.join()
else:
- self.train_thread.join()
- self.hint = self.player.move(self.state)
+ self.hint = self.player.move(self.state)
if not self.hint_thread.is_alive():
self.hint_thread = Thread(target=hint_cb)
@@ 260,8 265,10 @@ class Game:
btn = self.menu.get_widget('hint_btn')
if self.train_thread.is_alive():
btn.set_title('[...]')
+ btn.update_font({'color': (100, 100, 100)})
else:
btn.set_title('[Hint]')
+ btn.update_font({'color': (163, 190, 140)})
self.menu.draw(self.surface)
pygame.display.update()
A sweep_ai/x.npy => sweep_ai/x.npy +0 -0
A sweep_ai/y.npy => sweep_ai/y.npy +0 -0