diff --git a/cmsc320_hw4.ipynb b/cmsc320_hw4.ipynb
new file mode 100644
index 00000000..8ab9fbfc
--- /dev/null
+++ b/cmsc320_hw4.ipynb
@@ -0,0 +1,770 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "authorship_tag": "ABX9TyNlOgZTGUUqhQby0su1ySpw",
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from google.colab import drive\n",
+ "drive.mount('/content/drive')"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "4n8qy4J_Tb7B",
+ "outputId": "c16ec0f2-dbb4-4d6d-f491-e25edf3dcff8"
+ },
+ "execution_count": 1,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "id": "1fERlpkyTSF4"
+ },
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import pprint\n",
+ "import os\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "from sklearn.neural_network import MLPClassifier\n",
+ "from sklearn.svm import LinearSVC\n",
+ "from sklearn.metrics import confusion_matrix\n",
+ "from sklearn.model_selection import KFold\n",
+ "from sklearn.neural_network import MLPClassifier\n",
+ "from sklearn.tree import DecisionTreeClassifier\n",
+ "from sklearn.decomposition import PCA"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Loading data"
+ ],
+ "metadata": {
+ "id": "Pd-palSEoc54"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# 60/20/20 train/test/valid split\n",
+ "\n",
+ "path = \"./drive/MyDrive/cmsc320/HW4/homework4.csv\"\n",
+ "df = pd.read_csv(path)\n",
+ "\n",
+ "index = [\"%d\"%i for i in range(12)] + [\"Results\"]\n",
+ "data = np.array(df[index])\n",
+ "\n",
+ "#regularize\n",
+ "means = np.average(data[:,:12], axis=0)\n",
+ "stds = np.std(data[:,:12], axis=0)\n",
+ "data[:,:12] = np.subtract(np.divide(data[:,:12], stds), means)\n",
+ "\n",
+ "#shuffle data\n",
+ "np.random.shuffle(data)"
+ ],
+ "metadata": {
+ "id": "vyu85xCKTtWt"
+ },
+ "execution_count": 4,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This is model 1 trained on 10-fold CV: A SVM. The model achieves a peak accuracy of 75.1% accuracy and a peak precision of 32.0% across all runs"
+ ],
+ "metadata": {
+ "id": "xn_67dMSoKkM"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# 10-fold Cross validation\n",
+ "kfold = KFold(n_splits=10)\n",
+ "\n",
+ "for i, (train_index, test_index) in enumerate(kfold.split(data)):\n",
+ " train = data[train_index, :]\n",
+ " test = data[test_index, :]\n",
+ "\n",
+ " svm_classifier = LinearSVC(class_weight='balanced')\n",
+ " svm_classifier.fit(train[:, :12], train[:,12])\n",
+ "\n",
+ " output = svm_classifier.predict(test[:,:12])\n",
+ " #C_ij = i actual, j predicted\n",
+ " c_matrix = confusion_matrix(test[:,12], output)\n",
+ " print(\"confusion matrix:\")\n",
+ " print(c_matrix)\n",
+ " print(\"true positive:\\t%d\\ntrue negative:\\t%d\\nfalse positive:\\t%d\\nfalse negative:\\t%d\"%(c_matrix[1,1], c_matrix[0,0], c_matrix[0,1], c_matrix[1,0]))\n",
+ " precision = c_matrix[1,1] / (c_matrix[1,1] + c_matrix[0,1])\n",
+ " recall = c_matrix[1,1] / (c_matrix[1,1] + c_matrix[1,0])\n",
+ " print(\"\\naccuracy:\\t%f\"%(np.sum(np.equal(output, test[:,12])) / 1000))\n",
+ " print(\"precision:\\t%f\\nrecall:\\t\\t%f\"%(precision, recall))\n",
+ " print(\"\\n\")"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "Ov6GhBNMlQ-E",
+ "outputId": "ccc87c4e-d257-4625-9f91-b697776a5a6b"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "confusion matrix:\n",
+ "[[612 239]\n",
+ " [ 52 97]]\n",
+ "true positive:\t97\n",
+ "true negative:\t612\n",
+ "false positive:\t239\n",
+ "false negative:\t52\n",
+ "\n",
+ "accuracy:\t0.709000\n",
+ "precision:\t0.288690\n",
+ "recall:\t\t0.651007\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[637 234]\n",
+ " [ 45 84]]\n",
+ "true positive:\t84\n",
+ "true negative:\t637\n",
+ "false positive:\t234\n",
+ "false negative:\t45\n",
+ "\n",
+ "accuracy:\t0.721000\n",
+ "precision:\t0.264151\n",
+ "recall:\t\t0.651163\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[653 193]\n",
+ " [ 63 91]]\n",
+ "true positive:\t91\n",
+ "true negative:\t653\n",
+ "false positive:\t193\n",
+ "false negative:\t63\n",
+ "\n",
+ "accuracy:\t0.744000\n",
+ "precision:\t0.320423\n",
+ "recall:\t\t0.590909\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[650 196]\n",
+ " [ 58 96]]\n",
+ "true positive:\t96\n",
+ "true negative:\t650\n",
+ "false positive:\t196\n",
+ "false negative:\t58\n",
+ "\n",
+ "accuracy:\t0.746000\n",
+ "precision:\t0.328767\n",
+ "recall:\t\t0.623377\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[644 221]\n",
+ " [ 55 80]]\n",
+ "true positive:\t80\n",
+ "true negative:\t644\n",
+ "false positive:\t221\n",
+ "false negative:\t55\n",
+ "\n",
+ "accuracy:\t0.724000\n",
+ "precision:\t0.265781\n",
+ "recall:\t\t0.592593\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[633 223]\n",
+ " [ 45 99]]\n",
+ "true positive:\t99\n",
+ "true negative:\t633\n",
+ "false positive:\t223\n",
+ "false negative:\t45\n",
+ "\n",
+ "accuracy:\t0.732000\n",
+ "precision:\t0.307453\n",
+ "recall:\t\t0.687500\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[648 220]\n",
+ " [ 47 85]]\n",
+ "true positive:\t85\n",
+ "true negative:\t648\n",
+ "false positive:\t220\n",
+ "false negative:\t47\n",
+ "\n",
+ "accuracy:\t0.733000\n",
+ "precision:\t0.278689\n",
+ "recall:\t\t0.643939\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[666 196]\n",
+ " [ 53 85]]\n",
+ "true positive:\t85\n",
+ "true negative:\t666\n",
+ "false positive:\t196\n",
+ "false negative:\t53\n",
+ "\n",
+ "accuracy:\t0.751000\n",
+ "precision:\t0.302491\n",
+ "recall:\t\t0.615942\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[662 208]\n",
+ " [ 44 86]]\n",
+ "true positive:\t86\n",
+ "true negative:\t662\n",
+ "false positive:\t208\n",
+ "false negative:\t44\n",
+ "\n",
+ "accuracy:\t0.748000\n",
+ "precision:\t0.292517\n",
+ "recall:\t\t0.661538\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[634 222]\n",
+ " [ 57 87]]\n",
+ "true positive:\t87\n",
+ "true negative:\t634\n",
+ "false positive:\t222\n",
+ "false negative:\t57\n",
+ "\n",
+ "accuracy:\t0.721000\n",
+ "precision:\t0.281553\n",
+ "recall:\t\t0.604167\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This is model 2: A neural network. The model achieves above a 90% accuracy on almost every iteration of the 10-fold CV and has above 50% precision for every iteration"
+ ],
+ "metadata": {
+ "id": "air1sHHBn3zr"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# 10-fold Cross validation\n",
+ "kfold = KFold(n_splits=10)\n",
+ "\n",
+ "for i, (train_index, test_index) in enumerate(kfold.split(data)):\n",
+ " train = data[train_index, :]\n",
+ " test = data[test_index, :]\n",
+ "\n",
+ " nn_classifier = MLPClassifier()\n",
+ " nn_classifier.fit(train[:, :12], train[:,12])\n",
+ "\n",
+ " output = nn_classifier.predict(test[:,:12])\n",
+ " #C_ij = i actual, j predicted\n",
+ " c_matrix = confusion_matrix(test[:,12], output)\n",
+ " print(\"confusion matrix:\")\n",
+ " print(c_matrix)\n",
+ " print(\"true positive:\\t%d\\ntrue negative:\\t%d\\nfalse positive:\\t%d\\nfalse negative:\\t%d\"%(c_matrix[1,1], c_matrix[0,0], c_matrix[0,1], c_matrix[1,0]))\n",
+ " precision = c_matrix[1,1] / (c_matrix[1,1] + c_matrix[0,1])\n",
+ " recall = c_matrix[1,1] / (c_matrix[1,1] + c_matrix[1,0])\n",
+ " print(\"\\naccuracy:\\t%f\"%(np.sum(np.equal(output, test[:,12])) / 1000))\n",
+ " print(\"precision:\\t%f\\nrecall:\\t\\t%f\"%(precision, recall))\n",
+ " print(\"\\n\")"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "oTjOYzBub9qx",
+ "outputId": "e75ae2af-9651-4f98-dc3e-fa7720e61ad8"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "confusion matrix:\n",
+ "[[846 5]\n",
+ " [ 83 66]]\n",
+ "true positive:\t66\n",
+ "true negative:\t846\n",
+ "false positive:\t5\n",
+ "false negative:\t83\n",
+ "\n",
+ "accuracy:\t0.912000\n",
+ "precision:\t0.929577\n",
+ "recall:\t\t0.442953\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "name": "stderr",
+ "text": [
+ "/usr/local/lib/python3.10/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:690: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "confusion matrix:\n",
+ "[[861 10]\n",
+ " [ 60 69]]\n",
+ "true positive:\t69\n",
+ "true negative:\t861\n",
+ "false positive:\t10\n",
+ "false negative:\t60\n",
+ "\n",
+ "accuracy:\t0.930000\n",
+ "precision:\t0.873418\n",
+ "recall:\t\t0.534884\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[830 16]\n",
+ " [ 91 63]]\n",
+ "true positive:\t63\n",
+ "true negative:\t830\n",
+ "false positive:\t16\n",
+ "false negative:\t91\n",
+ "\n",
+ "accuracy:\t0.893000\n",
+ "precision:\t0.797468\n",
+ "recall:\t\t0.409091\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "name": "stderr",
+ "text": [
+ "/usr/local/lib/python3.10/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:690: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "confusion matrix:\n",
+ "[[837 9]\n",
+ " [ 86 68]]\n",
+ "true positive:\t68\n",
+ "true negative:\t837\n",
+ "false positive:\t9\n",
+ "false negative:\t86\n",
+ "\n",
+ "accuracy:\t0.905000\n",
+ "precision:\t0.883117\n",
+ "recall:\t\t0.441558\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[855 10]\n",
+ " [ 83 52]]\n",
+ "true positive:\t52\n",
+ "true negative:\t855\n",
+ "false positive:\t10\n",
+ "false negative:\t83\n",
+ "\n",
+ "accuracy:\t0.907000\n",
+ "precision:\t0.838710\n",
+ "recall:\t\t0.385185\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[849 7]\n",
+ " [ 73 71]]\n",
+ "true positive:\t71\n",
+ "true negative:\t849\n",
+ "false positive:\t7\n",
+ "false negative:\t73\n",
+ "\n",
+ "accuracy:\t0.920000\n",
+ "precision:\t0.910256\n",
+ "recall:\t\t0.493056\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[861 7]\n",
+ " [ 68 64]]\n",
+ "true positive:\t64\n",
+ "true negative:\t861\n",
+ "false positive:\t7\n",
+ "false negative:\t68\n",
+ "\n",
+ "accuracy:\t0.925000\n",
+ "precision:\t0.901408\n",
+ "recall:\t\t0.484848\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[857 5]\n",
+ " [ 86 52]]\n",
+ "true positive:\t52\n",
+ "true negative:\t857\n",
+ "false positive:\t5\n",
+ "false negative:\t86\n",
+ "\n",
+ "accuracy:\t0.909000\n",
+ "precision:\t0.912281\n",
+ "recall:\t\t0.376812\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[862 8]\n",
+ " [ 72 58]]\n",
+ "true positive:\t58\n",
+ "true negative:\t862\n",
+ "false positive:\t8\n",
+ "false negative:\t72\n",
+ "\n",
+ "accuracy:\t0.920000\n",
+ "precision:\t0.878788\n",
+ "recall:\t\t0.446154\n",
+ "\n",
+ "\n",
+ "confusion matrix:\n",
+ "[[847 9]\n",
+ " [ 76 68]]\n",
+ "true positive:\t68\n",
+ "true negative:\t847\n",
+ "false positive:\t9\n",
+ "false negative:\t76\n",
+ "\n",
+ "accuracy:\t0.915000\n",
+ "precision:\t0.883117\n",
+ "recall:\t\t0.472222\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This is model 3: A Decision Tree. The maximum accuracy and Recall achieved over 10-fold CV is 86.7% accuracy and 49.6% precision"
+ ],
+ "metadata": {
+ "id": "DrJ21i6rnmS8"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# 10-fold Cross validation\n",
+ "kfold = KFold(n_splits=10)\n",
+ "\n",
+ "for i, (train_index, test_index) in enumerate(kfold.split(data)):\n",
+ " train = data[train_index, :]\n",
+ " test = data[test_index, :]\n",
+ "\n",
+ " dtree_classifier = DecisionTreeClassifier()\n",
+ " dtree_classifier.fit(train[:, :12], train[:,12])\n",
+ "\n",
+ " output = dtree_classifier.predict(test[:,:12])\n",
+ " print(\"iter %d:\\n------------------\"%(i+1))\n",
+ "\n",
+ " #C_ij = i actual, j predicted\n",
+ " c_matrix = confusion_matrix(test[:,12], output)\n",
+ " print(\"confusion matrix:\")\n",
+ " print(c_matrix)\n",
+ " print(\"true positive:\\t%d\\ntrue negative:\\t%d\\nfalse positive:\\t%d\\nfalse negative:\\t%d\"%(c_matrix[1,1], c_matrix[0,0], c_matrix[0,1], c_matrix[1,0]))\n",
+ " precision = c_matrix[1,1] / (c_matrix[1,1] + c_matrix[0,1])\n",
+ " recall = c_matrix[1,1] / (c_matrix[1,1] + c_matrix[1,0])\n",
+ " print(\"\\naccuracy:\\t%f\"%(np.sum(np.equal(output, test[:,12])) / 1000))\n",
+ " print(\"precision:\\t%f\\nrecall:\\t\\t%f\"%(precision, recall))\n",
+ " print(\"\\n\")"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "_NHWq08yBroB",
+ "outputId": "6dab9505-0938-4676-e910-1d24747b1759"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "iter 1:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[779 72]\n",
+ " [ 80 69]]\n",
+ "true positive:\t69\n",
+ "true negative:\t779\n",
+ "false positive:\t72\n",
+ "false negative:\t80\n",
+ "\n",
+ "accuracy:\t0.848000\n",
+ "precision:\t0.489362\n",
+ "recall:\t\t0.463087\n",
+ "\n",
+ "\n",
+ "iter 2:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[767 104]\n",
+ " [ 59 70]]\n",
+ "true positive:\t70\n",
+ "true negative:\t767\n",
+ "false positive:\t104\n",
+ "false negative:\t59\n",
+ "\n",
+ "accuracy:\t0.837000\n",
+ "precision:\t0.402299\n",
+ "recall:\t\t0.542636\n",
+ "\n",
+ "\n",
+ "iter 3:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[769 77]\n",
+ " [ 85 69]]\n",
+ "true positive:\t69\n",
+ "true negative:\t769\n",
+ "false positive:\t77\n",
+ "false negative:\t85\n",
+ "\n",
+ "accuracy:\t0.838000\n",
+ "precision:\t0.472603\n",
+ "recall:\t\t0.448052\n",
+ "\n",
+ "\n",
+ "iter 4:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[762 84]\n",
+ " [ 90 64]]\n",
+ "true positive:\t64\n",
+ "true negative:\t762\n",
+ "false positive:\t84\n",
+ "false negative:\t90\n",
+ "\n",
+ "accuracy:\t0.826000\n",
+ "precision:\t0.432432\n",
+ "recall:\t\t0.415584\n",
+ "\n",
+ "\n",
+ "iter 5:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[776 89]\n",
+ " [ 81 54]]\n",
+ "true positive:\t54\n",
+ "true negative:\t776\n",
+ "false positive:\t89\n",
+ "false negative:\t81\n",
+ "\n",
+ "accuracy:\t0.830000\n",
+ "precision:\t0.377622\n",
+ "recall:\t\t0.400000\n",
+ "\n",
+ "\n",
+ "iter 6:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[768 88]\n",
+ " [ 76 68]]\n",
+ "true positive:\t68\n",
+ "true negative:\t768\n",
+ "false positive:\t88\n",
+ "false negative:\t76\n",
+ "\n",
+ "accuracy:\t0.836000\n",
+ "precision:\t0.435897\n",
+ "recall:\t\t0.472222\n",
+ "\n",
+ "\n",
+ "iter 7:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[804 64]\n",
+ " [ 69 63]]\n",
+ "true positive:\t63\n",
+ "true negative:\t804\n",
+ "false positive:\t64\n",
+ "false negative:\t69\n",
+ "\n",
+ "accuracy:\t0.867000\n",
+ "precision:\t0.496063\n",
+ "recall:\t\t0.477273\n",
+ "\n",
+ "\n",
+ "iter 8:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[780 82]\n",
+ " [ 86 52]]\n",
+ "true positive:\t52\n",
+ "true negative:\t780\n",
+ "false positive:\t82\n",
+ "false negative:\t86\n",
+ "\n",
+ "accuracy:\t0.832000\n",
+ "precision:\t0.388060\n",
+ "recall:\t\t0.376812\n",
+ "\n",
+ "\n",
+ "iter 9:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[787 83]\n",
+ " [ 80 50]]\n",
+ "true positive:\t50\n",
+ "true negative:\t787\n",
+ "false positive:\t83\n",
+ "false negative:\t80\n",
+ "\n",
+ "accuracy:\t0.837000\n",
+ "precision:\t0.375940\n",
+ "recall:\t\t0.384615\n",
+ "\n",
+ "\n",
+ "iter 10:\n",
+ "------------------\n",
+ "confusion matrix:\n",
+ "[[767 89]\n",
+ " [ 64 80]]\n",
+ "true positive:\t80\n",
+ "true negative:\t767\n",
+ "false positive:\t89\n",
+ "false negative:\t64\n",
+ "\n",
+ "accuracy:\t0.847000\n",
+ "precision:\t0.473373\n",
+ "recall:\t\t0.555556\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "plot_data = np.zeros((11,2))\n",
+ "\n",
+ "for i in range(12,1,-1):\n",
+ " pca = PCA(n_components = i)\n",
+ " data_transformed = np.ascontiguousarray(pca.fit_transform(data[:,:i])).astype(float)\n",
+ "\n",
+ " train = data_transformed[:9000,:]\n",
+ " test = data_transformed[9000:,:]\n",
+ "\n",
+ " nn_classifier = MLPClassifier()\n",
+ " nn_classifier.fit(train[:,:-1], data[:9000,-1])\n",
+ " output = nn_classifier.predict(test[:,:-1])\n",
+ "\n",
+ " accuracy = np.sum(np.equal(output, data[9000:,-1])) / 1000\n",
+ " plot_data[12 - i] = [12-i,accuracy]\n",
+ "print()\n",
+ "\n",
+ "plt.plot(plot_data[:,0], plot_data[:,1])\n",
+ "plt.title(\"neural network accuracy as increased dimensions are reduced via PCA\")\n",
+ "plt.xlabel(\"dimensions reduced\")\n",
+ "plt.ylabel(\"accuracy\")\n",
+ "plt.show()"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 472
+ },
+ "id": "W1TtAYJqlk2C",
+ "outputId": "6a8f0005-862b-4ccc-c924-faa5f2ca4fe7"
+ },
+ "execution_count": 15,
+ "outputs": [
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAm4AAAHHCAYAAAAGU9SoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAABwGklEQVR4nO3deVxUVf8H8M/MwAw7CIwgiICIomnumguuJGpZlqlpPxdMK5fcnieT3Eozs73csh41S03NrUWzDPd9wV1EWVRE2WWXbeb8/kAmRwYFRC4z83m/XvNS7tzlexfu/XLOuefIhBACRERERFTjyaUOgIiIiIjKh4kbERERkZFg4kZERERkJJi4ERERERkJJm5ERERERoKJGxEREZGRYOJGREREZCSYuBEREREZCSZuREREREaCiVs5XLt2DTKZDD/88IPUodQI77//PmQyGVJSUqQOhaoYr/WKedzj9cMPP0Amk+HatWu6ad26dUO3bt2qJD4pGdo3evKM4bjLZDK8//77VbKuvXv3QiaTYe/evVWyPmPAxK2G27FjR5Vd4ERERFS2kkSw5GNpaYn69etj+PDhiImJKTV/ZmYmPvjgAzRv3hx2dnawtrZG06ZN8e677+LWrVsGtzFo0CDIZDK8++67lYrRolJLUbXZsWMHlixZwuSNqoW3tzfu3r0LS0tLqUMxW3///bfUIVSJYcOG4dVXX4VKpZI6FDJhXbp0wd27d6FUKqt0vRMnTkTbtm1RWFiI8PBwfPfdd9i+fTvOnz8PDw8PAEBMTAyCgoJw48YNDBw4EG+88QaUSiXOnTuHFStWYOvWrbhy5YreejMzM/H777/Dx8cHP//8Mz7++GPIZLIKxWaWiVteXh6USiXkchY4VkROTg5sbW2lDqNamds+y2QyWFlZVes2tVotCgoKqn27NVVVP4CkolAooFAopA6jyhUVFUGr1Vb5eXpS6zV1crn8idw7AgMD8corrwAAQkJC0LBhQ0ycOBGrV69GaGgoioqK8PLLLyMxMRF79+5F586d9ZafP38+Fi5cWGq9mzdvhkajwcqVK9GjRw/s378fXbt2rVBs1ZK5lLSJioqKwsiRI+Hk5ARHR0eEhIQgNze31Pxr1qxB69atYW1tDWdnZ7z66quIi4vTm8fHxwcjR44steyD7UNKij3Xr1+PmTNnwtPTEzY2NsjMzERaWhr++9//olmzZrCzs4ODgwP69OmDs2fPVmo/S9oWHDp0CFOnToVarYatrS1eeuklJCcnl5r/zz//RGBgIGxtbWFvb4/nnnsOFy9e1H0/cuRILFmyBAD0im4BoFWrVnj55Zf11tesWTPIZDKcO3dON23Dhg2QyWSIiIjQTTt9+jT69OkDBwcH2NnZoWfPnjh69KjBfdm3bx/GjRuH2rVro27dumXu+/Xr19GgQQM0bdoUiYmJD51v3LhxaNSoEaytreHi4oKBAwcabI+Rnp6OKVOmwMfHByqVCnXr1sXw4cP12tbl5eXh/fffR8OGDWFlZYU6derg5ZdfRnR0NICy2z8Yaps0cuRI2NnZITo6Gn379oW9vT1ee+01AMCBAwcwcOBA1KtXDyqVCl5eXpgyZQru3r1bKu7Lly9j0KBBUKvVsLa2RqNGjTBjxgwAwJ49eyCTybB169ZSy61btw4ymQxHjhwp8/hV5JpdtGgRnnrqKdjY2KBWrVpo06YN1q1bV+a6H3Vc4uPj0b9/f9jZ2UGtVuO///0vNBqN3vJarRZff/01mjVrBisrK6jVavTu3RsnT57UzSOTyTBhwgSsXbsWTz31FFQqFXbu3AkAiI+Px6hRo+Dm5gaVSoWnnnoKK1eu1NtGQUEBZs+ejdatW8PR0RG2trYIDAzEnj17Su3P+vXr0bp1a9jb28PBwQHNmjXD119/rTdPeno6Jk+eDC8vL6hUKjRo0AALFy6EVqstNd/IkSPh6OgIJycnjBgxAunp6Q89nve7ePEievToAWtra9StWxcffvhhqW0AZd/DNm7ciA8++ACenp6wt7fHK6+8goyMDOTn52Py5MmoXbs27OzsEBISgvz8/FLrLc99tVu3bmjatCkuXbqE7t27w8bGBp6envjkk09Kre9R11dZba2WLl2qO+8eHh4YP358qeNYlXEYUt5rqOT34bPPPsNXX30FPz8/qFQqXLp0CUDx7/orr7wCZ2dnWFlZoU2bNvjtt98euu2qXG95r6my2pQZeo6W576bn5+POXPmoEGDBrr74bRp00pdd/n5+ZgyZQrUajXs7e3xwgsv4ObNm488PomJibCwsMAHH3xQ6rvIyEjIZDIsXrwYgOF7fEXu1+XVo0cPAEBsbCyA4gTs7NmzmDFjRqmkDQAcHBwwf/78UtPXrl2LZ599Ft27d0fjxo2xdu3aCsdSrSVugwYNgq+vLxYsWIDw8HD873//Q+3atfWy0vnz52PWrFkYNGgQRo8ejeTkZCxatAhdunTB6dOn4eTkVKltz5s3D0qlEv/973+Rn58PpVKJS5cuYdu2bRg4cCB8fX2RmJiI5cuXo2vXrrh06ZKuOLSi3n77bdSqVQtz5szBtWvX8NVXX2HChAnYsGGDbp6ffvoJI0aMQHBwMBYuXIjc3FwsW7YMnTt3xunTp+Hj44M333wTt27dwq5du/DTTz/pbSMwMBA///yz7ue0tDRcvHgRcrkcBw4cwNNPPw2g+AJWq9Vo3LgxgOJf9MDAQDg4OGDatGmwtLTE8uXL0a1bN+zbtw/t27fX2864ceOgVqsxe/Zs5OTkGNzf6Oho9OjRA87Ozti1axdcXV3LPDYnTpzA4cOH8eqrr6Ju3bq4du0ali1bhm7duuHSpUuwsbEBAGRnZyMwMBAREREYNWoUWrVqhZSUFPz222+4efMmXF1dodFo8PzzzyMsLAyvvvoqJk2ahKysLOzatQsXLlyAn59fBc5asaKiIgQHB6Nz58747LPPdPH88ssvyM3NxdixY+Hi4oLjx49j0aJFuHnzJn755Rfd8ufOnUNgYCAsLS3xxhtvwMfHB9HR0fj9998xf/58dOvWDV5eXli7di1eeuklvW2vXbsWfn5+6NChQ5nxxcTElOua/f777zFx4kS88sormDRpEvLy8nDu3DkcO3YMQ4cOrfBx0Wg0CA4ORvv27fHZZ5/hn3/+weeffw4/Pz+MHTtWN9/rr7+OH374AX369MHo0aNRVFSEAwcO4OjRo2jTpo1uvt27d2Pjxo2YMGECXF1d4ePjg8TERDzzzDO6xE6tVuPPP//E66+/jszMTEyePBlAcVXD//73PwwZMgRjxoxBVlYWVqxYgeDgYBw/fhwtWrQAAOzatQtDhgxBz549dfeYiIgIHDp0CJMmTQIA5ObmomvXroiPj8ebb76JevXq4fDhwwgNDcXt27fx1VdfAQCEEHjxxRdx8OBBvPXWW2jcuDG2bt2KESNGlOv4JSQkoHv37igqKsL06dNha2uL7777DtbW1uU+BwsWLIC1tTWmT5+OqKgoLFq0CJaWlpDL5bhz5w7ef/99HD16FD/88AN8fX0xe/Zs3bIVua/euXMHvXv3xssvv4xBgwZh06ZNePfdd9GsWTP06dMHQOWvr/fffx8ffPABgoKCMHbsWERGRmLZsmU4ceIEDh06pFdF/yTjKO81VGLVqlXIy8vDG2+8AZVKBWdnZ1y8eBGdOnWCp6en7pxu3LgR/fv3x+bNm0v9fhvyOOutimvqQeW572q1Wrzwwgs4ePAg3njjDTRu3Bjnz5/Hl19+iStXrmDbtm269Y0ePRpr1qzB0KFD0bFjR+zevRvPPffcI+Nwc3ND165dsXHjRsyZM0fvuw0bNkChUGDgwIFlLl/e+3VFlBQGuLi4AIAukR42bFi513Hr1i3s2bMHq1evBgAMGTIEX375JRYvXlyxklZRDebMmSMAiFGjRulNf+mll4SLi4vu52vXrgmFQiHmz5+vN9/58+eFhYWF3nRvb28xYsSIUtvq2rWr6Nq1q+7nPXv2CACifv36Ijc3V2/evLw8odFo9KbFxsYKlUol5s6dqzcNgFi1atVD93PVqlUCgAgKChJarVY3fcqUKUKhUIj09HQhhBBZWVnCyclJjBkzRm/5hIQE4ejoqDd9/PjxwtBp+uWXXwQAcenSJSGEEL/99ptQqVTihRdeEIMHD9bN9/TTT4uXXnpJ93P//v2FUqkU0dHRumm3bt0S9vb2okuXLqX2pXPnzqKoqEhv2yXnMzk5WURERAgPDw/Rtm1bkZaW9tDjI4QodQ6EEOLIkSMCgPjxxx9102bPni0AiC1btpSav+TYrly5UgAQX3zxRZnzlJz/PXv26H1v6JyOGDFCABDTp08vV9wLFiwQMplMXL9+XTetS5cuwt7eXm/a/fEIIURoaKhQqVS660EIIZKSkoSFhYWYM2dOqe3cr7zX7Isvviieeuqph67LkIcdl/vXL4QQLVu2FK1bt9b9vHv3bgFATJw4sdR6799/AEIul4uLFy/qzfP666+LOnXqiJSUFL3pr776qnB0dNSdg6KiIpGfn683z507d4Sbm5vePWbSpEnCwcGh1PV7v3nz5glbW1tx5coVvenTp08XCoVC3LhxQwghxLZt2wQA8cknn+jmKSoqEoGBgeW6N0yePFkAEMeOHdNNS0pKEo6OjgKAiI2N1U0v6x7WtGlTUVBQoJs+ZMgQIZPJRJ8+ffS21aFDB+Ht7a37uSL31a5du5b6XczPzxfu7u5iwIABumnlub5K7iEl+5aUlCSUSqXo1auX3jW8ePFiAUCsXLnyicRhSHmvoZLfBwcHB5GUlKQ3f8+ePUWzZs1EXl6ebppWqxUdO3YU/v7+D91+Vay3ItcUAIP3lgefo+W57/70009CLpeLAwcO6H3/7bffCgDi0KFDQgghzpw5IwCIcePG6c03dOjQMuO53/LlywUAcf78eb3pTZo0ET169ND9bOgeX977tSEl61u5cqVITk4Wt27dEtu3bxc+Pj5CJpOJEydOCCGK73+Ojo4PXdeDPvvsM2FtbS0yMzOFEEJcuXJFABBbt26t0HqqtZHXW2+9pfdzYGAgUlNTkZmZCQDYsmULtFotBg0ahJSUFN3H3d0d/v7+BqtCymvEiBGl/hJRqVS6dm4ajQapqamws7NDo0aNEB4eXultvfHGG3qNDQMDA6HRaHD9+nUAxSUB6enpGDJkiN5+KhQKtG/fvlz7GRgYCADYv38/gOKStbZt2+LZZ5/FgQMHABQXeV+4cEE3r0ajwd9//43+/fujfv36unXVqVMHQ4cOxcGDB3XnosSYMWPKbKdy4cIFdO3aFT4+Pvjnn39Qq1atR8Z9/zkoLCxEamoqGjRoACcnJ71jvnnzZjRv3tzgX60lx3bz5s1wdXXF22+/XeY8lXF/CZKhuHNycpCSkoKOHTtCCIHTp08DAJKTk7F//36MGjUK9erVKzOe4cOHIz8/H5s2bdJN27BhA4qKivB///d/D42tvNesk5MTbt68iRMnTlRgzx/O0O/v/W9Zbd68GTKZrNRfyEDp89G1a1c0adJE97MQAps3b0a/fv0ghND7vQgODkZGRoZu/xQKhe6vU61Wi7S0NBQVFaFNmzaljkFOTg527dpV5j798ssvCAwMRK1atfS2GRQUBI1Go/v92rFjBywsLPSuDYVCYfDaM2THjh145pln0K5dO900tVqtq4ovj+HDh+uVSLVv3x5CCIwaNUpvvvbt2yMuLg5FRUUAKn5ftbOz07sOlUol2rVrp3euK3N9/fPPPygoKMDkyZP12hePGTMGDg4O2L59e7XEAZT/GioxYMAAqNVq3c9paWnYvXs3Bg0ahKysLN0xTU1NRXBwMK5evYr4+PhHxvE4662Ka+pB5bnv/vLLL2jcuDECAgL0rqeSqsSS62nHjh0Aihv536+k5PxRXn75ZVhYWOjVVF24cAGXLl3C4MGDH7psee7XjzJq1Cio1Wp4eHjgueeeQ05ODlavXq2rOcjMzIS9vX251lVi7dq1eO6553TL+fv7o3Xr1hWuLq3WxO3Bh1nJg/7OnTsAgKtXr0IIAX9/f6jVar1PREQEkpKSKr1tX1/fUtO0Wi2+/PJL+Pv7Q6VSwdXVFWq1GufOnUNGRkalt1We/QSK68wf3M+///67XPvp5uYGf39/XZJ24MABBAYGokuXLrh16xZiYmJw6NAhaLVaXeKWnJyM3NxcNGrUqNT6GjduDK1WW6rNi6HjVqJfv36wt7fHX3/9BQcHh0fGDAB3797F7Nmzde2JSo55enq63jGPjo5G06ZNH7qu6OhoNGrUCBYWVVfjb2FhYbAt340bNzBy5Eg4Ozvr2niVNCgtibvkgfKouAMCAtC2bVu9X9a1a9fimWeeQYMGDR66bHmv2XfffRd2dnZo164d/P39MX78eBw6dKh8B8GAkvZq96tVq5bumgaKz4eHhwecnZ0fub4Hr6vk5GSkp6fju+++K/U7ERISAgB6vxerV6/G008/DSsrK7i4uECtVmP79u16x2DcuHFo2LAh+vTpg7p162LUqFG6tnQlrl69ip07d5baZlBQkN42r1+/jjp16sDOzk5veUO/S4Zcv34d/v7+paaXd3mg9H3F0dERAODl5VVqular1R2Lit5X69atWyrRfvBcV+b6KvnD9cF9ViqVqF+/vu77Jx1HifJcQyUevF6joqIghMCsWbNKHdOSP1zKcx9/nPVWxTX1oPLcd69evYqLFy+Wiq9hw4al4pPL5aWarJQ3PldXV/Ts2RMbN27UTduwYQMsLCxKte9+UHnu148ye/Zs7Nq1C7t378a5c+dw69YtvWpRBwcHZGVllWtdQHEzjdOnT6NTp06IiorSfbp164Y//vijVKHJw1RrG7eySm6EEACKH0oymQx//vmnwXnvv2mWVaKi0WgMLmuo3v+jjz7CrFmzMGrUKMybNw/Ozs6Qy+WYPHmywQae5VWe/QSK27m5u7uXmq+8iUjnzp0RFhaGu3fv4tSpU5g9ezaaNm0KJycnHDhwABEREbCzs0PLli0ruSeGj1uJAQMGYPXq1Vi7di3efPPNcq3v7bffxqpVqzB58mR06NABjo6OkMlkePXVVx/rmJflYdeJIfeXaN0/77PPPou0tDS8++67CAgIgK2tLeLj4zFy5MhKxT18+HBMmjQJN2/eRH5+Po4ePaprbPsw5b1mGzdujMjISPzxxx/YuXMnNm/ejKVLl2L27NkGG/w+SlW/HfjgdVUS+//93/+V2W6spN3mmjVrMHLkSPTv3x/vvPMOateuDYVCgQULFujaoQBA7dq1cebMGfz111/4888/8eeff2LVqlUYPny4ro2JVqvFs88+i2nTphncZsnDqCYo6xxU5X21POsDqv76MuRJxlHea6hEWdfrf//7XwQHBxvcxqP+CHuS6y2vsu6DD6PVatGsWTN88cUXBr9/8A+Jx/Hqq68iJCQEZ86cQYsWLbBx40b07Nnzoe2oq+p+3axZM90fcIYEBATg9OnTiIuLK9c+r1mzBgAwZcoUTJkypdT3mzdv1v2R+ig1qjsQPz8/CCHg6+v7yBtmrVq1DL7Rdf36db1qwIfZtGkTunfvjhUrVuhNT09Pf+iF8bhK/gKpXbv2Qy8M4OFVfoGBgVi1ahXWr18PjUaDjh07Qi6Xo3PnzrrErWPHjroboFqtho2NDSIjI0ut6/Lly5DL5RX6pfv0009hYWGBcePGwd7evlyN3jdt2oQRI0bg888/103Ly8srdS79/Pxw4cKFh67Lz88Px44dQ2FhYZn9jpWUdj64/gf/un+Y8+fP48qVK1i9ejWGDx+um/5gFVzJdfeouIHiG9LUqVPx888/6/pNe1TxP1Cxa9bW1haDBw/G4MGDUVBQgJdffhnz589HaGjoE3l93s/PD3/99RfS0tLKVep2v5K3zjQazSN/JzZt2oT69etjy5Yter8fhqpolUol+vXrh379+kGr1WLcuHFYvnw5Zs2ahQYNGsDPzw/Z2dmP3Ka3tzfCwsKQnZ2tl+gY+l0qa/mSkvb7lXf5x1GR+2pFVPT68vb2BlC8z/ffowsKChAbG/vIc1BVcQAVu4YMKYnf0tKy0nE/7norck0Zel4WFBTg9u3betPKe989e/Ysevbs+dDnk7e3N7Rara5m5GHxlaV///548803ddWlV65cQWho6EOXKe/9+nH169cPP//8M9asWfPImIQQWLduHbp3745x48aV+n7evHlYu3ZtuRO3GtWR2csvvwyFQoEPPvhA768qoHjHU1NTdT/7+fnh6NGjKCgo0E37448/SlX1PYxCoSi1nV9++aVcbRMeR3BwMBwcHPDRRx+hsLCw1Pf3dx1S0oeYoSS1pAp04cKFePrpp3VVJ4GBgQgLC8PJkyd18wDF+9urVy/8+uuveq/oJyYmYt26dejcuXO5qzyB4qTyu+++wyuvvIIRI0aU6zV4Q8d80aJFpf7yGzBgAM6ePWuw24yS5QcMGICUlBSDJVUl83h7e0OhUOjaKpVYunTpI2O9P+b711ny/we7lVCr1ejSpQtWrlyJGzduGIynhKurK/r06YM1a9Zg7dq16N27d7n+WCjvNXv/7wpQnMA0adIEQgiD11xVGDBgAIQQBks6Hoz5QQqFAgMGDMDmzZsNPjju/50wdD6OHTtWqhuVB4+BXC7XldqVdFswaNAgHDlyBH/99Vepbaanp+vaifXt2xdFRUVYtmyZ7nuNRoNFixY9dL9K9O3bF0ePHsXx48f19qkyXQFUVEXuq+VVmesrKCgISqUS33zzjV4cK1asQEZGRrneNqyKOIDyX0NlqV27Nrp164bly5eXSn4AGOz+qarXW5Frys/Pr9Q98LvvvqvUfXfQoEGIj4/H999/X2qeu3fv6nofKHnz95tvvtGbp+RN7fJwcnJCcHAwNm7ciPXr10OpVKJ///4PXaa89+vH9corr6BZs2aYP3++wesmKytL1w3UoUOHcO3aNYSEhOCVV14p9Rk8eDD27NlT5kgLD6pxJW4ffvghQkNDce3aNfTv3x/29vaIjY3F1q1b8cYbb+C///0vgOLXjDdt2oTevXtj0KBBiI6Oxpo1ayrUBcTzzz+PuXPnIiQkBB07dsT58+exdu3acpfYVZaDgwOWLVuGYcOGoVWrVnj11VehVqtx48YNbN++HZ06ddIlI61btwZQ3MAzODgYCoUCr776KoDiInN3d3dERkbqNZLu0qWLbiiN+xM3APjwww+xa9cudO7cGePGjYOFhQWWL1+O/Px8g30kPYpcLseaNWvQv39/DBo0CDt27NA1UjXk+eefx08//QRHR0c0adIER44cwT///KN7xbrEO++8g02bNmHgwIEYNWoUWrdujbS0NPz222/49ttv0bx5cwwfPhw//vgjpk6diuPHjyMwMBA5OTn4559/MG7cOLz44otwdHTEwIEDsWjRIshkMvj5+eGPP/6oUHvJgIAA+Pn54b///S/i4+Ph4OCAzZs367W1KfHNN9+gc+fOaNWqFd544w34+vri2rVr2L59O86cOaM37/Dhw3UdPM6bN69csZT3mu3Vqxfc3d3RqVMnuLm5ISIiAosXL9ZrGFvVunfvjmHDhuGbb77B1atX0bt3b2i1Whw4cADdu3fHhAkTHrr8xx9/jD179qB9+/YYM2YMmjRpgrS0NISHh+Off/5BWlqa7hhs2bIFL730Ep577jnExsbi22+/RZMmTZCdna1b3+jRo5GWloYePXqgbt26uH79OhYtWoQWLVrousd555138Ntvv+H555/HyJEj0bp1a+Tk5OD8+fPYtGkTrl27BldXV/Tr1w+dOnXC9OnTce3aNTRp0gRbtmwpd3uZadOm4aeffkLv3r0xadIkXdcN3t7eev0uPgkVua+WV2WuL7VajdDQUHzwwQfo3bs3XnjhBURGRmLp0qVo27btI1/Mqao4gPJfQw+zZMkSdO7cGc2aNcOYMWNQv359JCYm4siRI7h582al+wMt73orck2NHj0ab731FgYMGIBnn30WZ8+exV9//VXqj8Xy3HeHDRuGjRs34q233sKePXvQqVMnaDQaXL58GRs3bsRff/2FNm3aoEWLFhgyZAiWLl2KjIwMdOzYEWFhYYiKiqrQ8Rg8eDD+7//+D0uXLkVwcPAjuwSryP36cVhaWmLLli0ICgpCly5dMGjQIHTq1AmWlpa4ePEi1q1bh1q1amH+/PlYu3YtFApFmX+cvPDCC5gxYwbWr1+PqVOnPnrjFXoHtZLu7z7ifg++Ll5i8+bNonPnzsLW1lbY2tqKgIAAMX78eBEZGak33+effy48PT2FSqUSnTp1EidPnizzVfpffvmlVFx5eXniP//5j6hTp46wtrYWnTp1EkeOHCm1jop2B1LyuvCDMTzYJcWePXtEcHCwcHR0FFZWVsLPz0+MHDlSnDx5UjdPUVGRePvtt4VarRYymaxU1yADBw4UAMSGDRt00woKCoSNjY1QKpXi7t27peIMDw8XwcHBws7OTtjY2Iju3buLw4cPl2tfhDB8PnNzc0XXrl2FnZ2dOHr0aJnH6M6dOyIkJES4uroKOzs7ERwcLC5fvmywe5fU1FQxYcIE4enpKZRKpahbt64YMWKEXncRubm5YsaMGcLX11dYWloKd3d38corr+h1d5KcnCwGDBggbGxsRK1atcSbb74pLly4YLDbC1tbW4NxX7p0SQQFBQk7Ozvh6uoqxowZI86ePWvwurhw4YJ46aWXhJOTk7CyshKNGjUSs2bNKrXO/Px8UatWLeHo6GjwPBlS3mt2+fLlokuXLsLFxUWoVCrh5+cn3nnnHZGRkfHQ9ZfVHYih41JyHdyvqKhIfPrppyIgIEAolUqhVqtFnz59xKlTp3TzABDjx483uP3ExEQxfvx44eXlpTufPXv2FN99951uHq1WKz766CPh7e0tVCqVaNmypfjjjz/EiBEj9LrB2LRpk+jVq5eoXbu2UCqVol69euLNN98Ut2/f1ttmVlaWCA0NFQ0aNBBKpVK4urqKjh07is8++0yv+43U1FQxbNgw4eDgIBwdHcWwYcPE6dOny3VvEEKIc+fOia5duworKyvh6ekp5s2bJ1asWFHu7kAevIeV9Tta1v22PPfVrl27Guxe48FjW57rq6z7++LFi0VAQICwtLQUbm5uYuzYseLOnTt681RlHIaU9xoq+X349NNPDa4nOjpaDB8+XLi7uwtLS0vh6ekpnn/+ebFp06aHbr+q1lvea0qj0Yh3331XuLq6ChsbGxEcHCyioqIqfd8tKCgQCxcuFE899ZRQqVSiVq1aonXr1uKDDz7QO/Z3794VEydOFC4uLsLW1lb069dPxMXFlas7kBKZmZnC2tpaABBr1qwp9b2h52tF7tdlrc9QzmDInTt3xOzZs0WzZs2EjY2NsLKyEk2bNhWhoaHi9u3boqCgQLi4uIjAwMCHrsfX11e0bNmyXNuUCfGIOgwieiKKiorg4eGBfv36lWqzRkREZEiNauNGZE62bduG5ORkvQa0RERED8MSN6JqduzYMZw7dw7z5s2Dq6vrY3X2TERE5oUlbkTVbNmyZRg7dixq166NH3/8UepwiIjIiLDEjYiIiMhIsMSNiIiIyEgwcSMiIiIyEjWiA94lS5bg008/RUJCApo3b45FixahXbt2BuctLCzEggULsHr1asTHx6NRo0ZYuHAhevfurZtnwYIF2LJlCy5fvgxra2t07NgRCxcuLPfgtlqtFrdu3YK9vf1Dh/QgIiKimkMIgaysLHh4eJQad9pklKu3tydo/fr1QqlUipUrV4qLFy+KMWPGCCcnJ5GYmGhw/mnTpgkPDw+xfft2ER0dLZYuXSqsrKxEeHi4bp7g4GCxatUqceHCBXHmzBnRt29fUa9ePZGdnV2umEo6COSHH3744YcffozvExcXVyU5Sk0k+csJ7du3R9u2bXVDPGm1Wnh5eeHtt9/G9OnTS83v4eGBGTNmYPz48bppAwYMgLW1NdasWWNwG8nJyahduzb27duHLl26PDKmjIwMODk5IS4urkJjdxIREZF0MjMz4eXlhfT0dN343aZG0qrSgoICnDp1CqGhobppcrkcQUFBZQ72m5+fDysrK71p1tbWOHjwYJnbKRlP0NnZuVxxlVSPOjg4MHEjIiIyMqbczEnSCuCUlBRoNBq4ubnpTXdzc0NCQoLBZYKDg/HFF1/g6tWr0Gq12LVrF7Zs2YLbt28bnF+r1WLy5Mno1KkTmjZtanCe/Px8ZGZm6n2IiIiIahqja7n39ddfw9/fHwEBAVAqlZgwYQJCQkLKbIQ4fvx4XLhwAevXry9znQsWLICjo6Pu4+Xl9aTCJyIiIqo0SRM3V1dXKBQKJCYm6k1PTEyEu7u7wWXUajW2bduGnJwcXL9+HZcvX4adnR3q169fat4JEybgjz/+wJ49e1C3bt0y4wgNDUVGRobuExcX93g7RkRERPQESJq4KZVKtG7dGmFhYbppWq0WYWFh6NChw0OXtbKygqenJ4qKirB582a8+OKLuu+EEJgwYQK2bt2K3bt3w9fX96HrUqlUuvZsbNdGRERENZXk/bhNnToVI0aMQJs2bdCuXTt89dVXyMnJQUhICABg+PDh8PT0xIIFCwAUD9AdHx+PFi1aID4+Hu+//z60Wi2mTZumW+f48eOxbt06/Prrr7C3t9e1l3N0dIS1tXX17yQRERFRFZA8cRs8eDCSk5Mxe/ZsJCQkoEWLFti5c6fuhYUbN27otV/Ly8vDzJkzERMTAzs7O/Tt2xc//fQTnJycdPMsW7YMANCtWze9ba1atQojR4580rtERERE9ERI3o9bTZSZmQlHR0dkZGSw2pSIiMhImMPz2+jeKiUiIiIyV0zciIiIiIwEEzciIiIiI8HEjYiIiMhIMHEjIiIiMhJM3Iio0vIKNbhboJE6DCIisyF5P25EZFzi0nKxJzIJuy8n4Uh0KjRagTY+tdAjoDa6N6qNBrXtIJPJpA6TiMgksR83A8yhHxii8irUaHHy2h1dshaVlP3Q+evWsi5O4gJqo0N9F1hZKqopUiIyd+bw/GbiZoA5nHiih0nOysfeyCTsjUzG/qvJyMor0n2nkMvQ2rsWujeqjR4BtWFlKceey0nYHZmMo9GpKNBodfNaWcrRyc8V3QKK5/V04pBzRPTkmMPzm4mbAeZw4onup9UKnI/PwJ7IJOy5nISzNzP0vne2VaJbQzW6B9RGF381HG0sDa4nt6AIh6JSsftyEvZGJuF2Rp7e943c7NH9XhLXqp4TLBRsZktEVcccnt9M3AwwhxNPlJlXiINXU+4lWclIyc7X+76ppwN6NCqu8ny6rhMU8oq1WxNC4HJCFnZfLk4Gw2/cgfa+u42DlQW6NFSjR0BtdG2ohoudqip2i4jMmDk8v5m4GWAOJ57MjxAC0cnZ2H25uK3ayWt3UHRfJmWrVCDQX43uAWp0a1Qbbg5WVbr9OzkF2H81GXsuJ2HvlWSk5xbqvpPJgBZeTrrq16c8HPiCAxFVmDk8v5m4GWAOJ57MQ16hBkdiUrH3chJ2RyYhLu2u3vf1XW11VZdtfZyhtKieqkuNVuBM3J17SWQyIm5n6n1f216F7vdK+zr7u8JOxRfgiejRzOH5zcTNAHM48WS64tPvYs+96slD0SnIK/z3ZQGlQo729Z11XXf4uNpKGOm/bmfcxd7IZOy+nIRDUSnIva9vOEuFDO18nXWlcfXVdhJGSkQ1mTk8v5m4GWAOJ55MR5FGi/Ab6boXAi4nZOl97+5ghe4BanRvVBudGrjCtoaXXuUXaXAsJq24bVxkEq6n5up97+Nig+73Es/29Z2hsmB3I0RUzBye30zcDDCHE0/GLS2nAHsjk7AnMhn7ryQj4+6/7cXkMqBlvX87xG1cx96o24vF3GuXtycyCcdj01Co+feWZaNUoFMDV92+ujtWbbs8IjIu5vD8ZuJmgDmceDIuQghcvJV5r7+0JJyJS8f9v7lONpbo2rC4VK1rQzVq2SqlC/YJys4vwsGrKcVVwZFJSMrSfxO2cR0H9AgoflO1hVetCr8JS0TGzRye30zcDDCHE08136OSlAB3e/S492JBCy/z6xPtUclsrZJk9l53I042ppnMEtG/zOH5zcTNAHM48VQzlVQL7o1MxrHYVL1qQWvL+6oFA9So48hRCO6Xmp2PfVeKX3DYfyUZmfeN9iCXAa3q1dK9QRvgbtzVx0RkmDk8v5m4GWAOJ96Q3IIixKbk4FpKLq6l5iAmOQfXUnNwIy0Xnk7Wen1syVkFVSXyizQ4Hpum66T22gMN8b1dbHTdYrT3dea4n+V0/wsbey4nITJR/4UNF1sl6qtt4etqCx9XW9S/96+Piy2PMZERM4fnNxM3A0z5xOcXaXAjNRexKTnFSVpqju7/iZn5j14BALW9Ct0bFbcj6tTAFfZWhoc/IsMSMvJ0A7Y/2PWFhby464uSQdrru9qyZKgK3LyTiz2RydhroIuUB3k4WsHHtTip872XzPmqbeFVy6ba+rkjosox5ed3CSZuBhj7iS/SaHHzzl3EpuYgNlk/ObuVfldv2KEHOdlYFj+wXGx1D6+6tax1QxcZ6mOrrQ8TjYcp6Wx2z+XiarxLD3Q2W5IId29U3NksE+EnK69QgyuJWbrS5diUbMSm5iI2OVuvevVBCrkMdWtZFydyrvqldR5O1nwRgqgGMPbnd3kwcTPAGE68VitwOzMP11JyEJOSg2v3PrEpxVWbRQ/JzuxUFvBxtYGPy79VRCUPokc14C5v1V6PgNpoZ8ZVe+m5Bdh3pXh4p31XknHngeGdmtd1YtVzDSOEwJ3cwn9Lo1Ny9Eqm7/+D5UFKhRxeztbwdbWDr6sNfF3t4ONqA19XW7g7WPGPGaJqYgzP78fFxM2AmnLihRBIzs7/t1QgJVf3MLmWmoP8orKre1QWcl3JQHFi9u/DRG2nqrIHSUxyNvZEFico5tyYvmRA9T2RxQntqev6A6rblwyo3qg2ujZSw5UDqhsVIQSSsvINJnXX03JR8JDfRWtLBbxdbFBfXVzten+bOhdbJZM6oipUU57fTxITNwOq+8Sn5xboPxBSixO1aym5yM4vu+rGQi5DPRcbvWrNko+7g1W1l+KUdF9R3DFsUqk2c43rOOjaxrWsZ/x9bOUWFOFwVCp2RyZh7+Uk3MrI0/u+oZtd8VuMjWqjlXctWJpZdx3mQqMVuJV+F9dSc/RKwGNTchB35y40Dyn9trey0G9Ld18VrKM1q8yJKoqJm5l6Uif+dsZdnLx2515y9m+idn812oNkMuja1ejefLv3F7unk3WN7burvB3G9giojS7+xtNh7I3UXOy+nIjdkck4GpOqV9KispCjUwPXe8MxqVG3lo2EkVJNUFjS3vReiXnJH2SxKTm4lXEXD7v7utgqdW+61lfbolW9Wujg51J9wRMZISZuZupJnfgNJ27g3c3nDX7n7mB1r03Mv21kfF1t4OVsYxJjMablFGDflSTsvpyMfZFJpfrYqqlDNBUUaXHyelpxAno5CdHJOXrfezpZ6zrB7eDnYrZt+qji8go1uJGWq+t2Jza5+A+6ayk5pTpbLjHvxacwrINP9QZKZESYuJmpJ3XiT9+4g/nbI0p1NeDjagMbZc0e+LsqPWpQ9DqOVujWqLjUSopB0ZOy8rD3Xru9A1dT9KqrFXIZ2njX0iVrDWrb1Zgkk0xHdn7Rv+1ZU3Jw9mYG/olIhFIhx6axHfB0XSepQySqkZi4mSlzOPE1SXz63eJhnQz0saVUyNG+vrMuUfJ2sa3y7Wu1AufiM3SJ5LmbGXrfu9op0bVh8fY7+7uy7RFVOyEE3vjpFHZdSkTdWtbY/nYgHG14HRI9yBye30zcDDCHE19T5RVqcDQmVdc2Li7trt739dW26HFvJIG2Ps6V7hA1M68QB66kYPflJOy7koSU7AK975+u66gbseBpT0d210GSy7hbiOcXHUBc2l0ENXbD98Nbs7SX6AHm8Pxm4maAOZx4YyCEQHRytq7j2hPX0vT6p7NTWaDzve5GujVSo7aD1UPXFZVUPA7onsgknLx2p9S6Av2LXyzo1kiN2vZlr4tIKudvZmDAssMo0GjxXt8AvNHFT+qQiGoUc3h+M3EzwBxOvDHKzCvEwaspukHYU7L1G3A39XTQlcY1r+uEAo0WR0pK7y4n4eYd/dI7P7WtbsSHNt6VL70jqk4/Hb2OWdsuQCGXYf0bz6Ctj7PUIRHVGObw/GbiZoA5nHhjp9UKXLiVoRvB4ewD7dKcbZXILSjSby9nIUeH+i66t1frubC7DjI+QghMWn8Gv529BTcHFbZPDGSHzkT3mMPzm4mbAeZw4k1Ncla+boip/VeSkXXvTdA6jla6TnA7NnAxq7d3yXTl5BfhhcUHEZ2cg84NXLF6VDuj79CaqCqYw/ObiZsB5nDiTVmhRotzN9Nhp7JEQzd210GmKTIhCy8uOYi8Qi0mB/ljclBDqUMikpw5PL/ZqIdMjqVCjtbezmjkXnM68iWqao3c7TG/fzMAwNdhV3HgarLEERFRdWDiRkRkpAa0rotX23pBCGDy+jNIeGC8XCIyPUzciIiM2PsvPIXGdRyQmlOAt38OR6FG++iFiMhoMXEjIjJiVpYKLH2tFexUFjhx7Q4++ytS6pCI6Ali4kZEZOR8XW3xyStPAwCW74/BrkuJEkdERE8KEzciIhPQt1kdjOzoAwD4z8YziEvLlTYgInoimLgREZmI9/o2RgsvJ2TmFWH8unDkF2mkDomIqhgTNyIiE6G0kGPJa63gZGOJczczMH97hNQhEVEVY+JGRGRCPJ2s8cWg5gCAH49cx+9nb0kcERFVJSZuREQmpkeAG8Z18wMATN98DtHJ2RJHRERVhYkbEZEJmvpsQ7T3dUZOgQbj1oTjbgHbuxGZAiZuREQmyEIhx6IhLeFqp0JkYhZm/XpB6pCIqAowcSMiMlG1HazwzZAWkMuATaduYuOJOKlDIqLHxMSNiMiEdfRzxdRnGwIAZv16ARG3MyWOiIgeBxM3IiITN65bA3RtqEZ+kRbj1oYjK69Q6pCIqJKYuBERmTi5XIYvB7dAHUcrxKbkYPrm8xBCSB0WEVUCEzciIjPgbKvE4qGtYCGXYfv52/jxyHWpQyKiSmDiRkRkJlp718L0PgEAgA+3X8KZuHRpAyKiCmPiRkRkRl7v7IveT7mjUCMwfm040nMLpA6JiCqAiRsRkRmRyWT4ZODTqOdsg/j0u/jPxrPQatnejchYSJ64LVmyBD4+PrCyskL79u1x/PjxMuctLCzE3Llz4efnBysrKzRv3hw7d+7Um2f//v3o168fPDw8IJPJsG3btie8B0RExsXByhJLX2sFpYUcYZeTsHx/jNQhEVE5SZq4bdiwAVOnTsWcOXMQHh6O5s2bIzg4GElJSQbnnzlzJpYvX45Fixbh0qVLeOutt/DSSy/h9OnTunlycnLQvHlzLFmypLp2g4jI6DT1dMT7/Z4CAHz2dySOxaRKHBERlYdMSPhOePv27dG2bVssXrwYAKDVauHl5YW3334b06dPLzW/h4cHZsyYgfHjx+umDRgwANbW1lizZk2p+WUyGbZu3Yr+/ftXKK7MzEw4OjoiIyMDDg4OFdspIiIjIYTA1I1nsfV0PGrbq7B9YiDU9iqpwyKqNHN4fktW4lZQUIBTp04hKCjo32DkcgQFBeHIkSMGl8nPz4eVlZXeNGtraxw8ePCJxkpEZIpkMhk+7N8UDWrbISkrH5PWn4aG7d2IajTJEreUlBRoNBq4ubnpTXdzc0NCQoLBZYKDg/HFF1/g6tWr0Gq12LVrF7Zs2YLbt28/Viz5+fnIzMzU+xARmQNblQWWvdYK1pYKHI5Oxdf/XJE6JCJ6CMlfTqiIr7/+Gv7+/ggICIBSqcSECRMQEhICufzxdmPBggVwdHTUfby8vKooYiKims/fzR4fvdwUALBoTxT2XUmWOCIiKotkiZurqysUCgUSExP1picmJsLd3d3gMmq1Gtu2bUNOTg6uX7+Oy5cvw87ODvXr13+sWEJDQ5GRkaH7xMXFPdb6iIiMzUst62JIu3oQApiy4QxuZ9yVOiQiMkCyxE2pVKJ169YICwvTTdNqtQgLC0OHDh0euqyVlRU8PT1RVFSEzZs348UXX3ysWFQqFRwcHPQ+RETmZk6/JnjKwwFpOQWYsO40CjVaqUMiogdIWlU6depUfP/991i9ejUiIiIwduxY5OTkICQkBAAwfPhwhIaG6uY/duwYtmzZgpiYGBw4cAC9e/eGVqvFtGnTdPNkZ2fjzJkzOHPmDAAgNjYWZ86cwY0bN6p134iIjI2VpQJLX2sFe5UFTl2/g092XpY6JCJ6gIWUGx88eDCSk5Mxe/ZsJCQkoEWLFti5c6fuhYUbN27otV/Ly8vDzJkzERMTAzs7O/Tt2xc//fQTnJycdPOcPHkS3bt31/08depUAMCIESPwww8/VMt+EREZK28XW3w68Gm8tSYc3x+IRRsfZwQ/Zbj5ChFVP0n7caupzKEfGCKih5n3xyWsOBgLeysLbH87EPVcbKQOieiRzOH5bVRvlRIRUfWY3icAreo5ISuvCGPXnkJeoUbqkIgITNyIiMgAS4Uci4e2Qi0bS1y8lYl5f1ySOiQiAhM3IiIqg4eTNb4c3AIyGbD22A38eiZe6pCIzB4TNyIiKlO3RrUxoXsDAEDolvOISsqSOCIi88bEjYiIHmpyUEN0qO+C3AINxq4JR25BkdQhEZktJm5ERPRQCrkMXw9pAbW9CleTsjFz6wWwQwIiaTBxIyKiR6ptb4VFQ1pCLgO2nI7H+hMcGpBICkzciIioXJ6p74L/9GoEAJjz20VcvJUhcURE5oeJGxERldvYrn7o3kiNgiItxq0NR2ZeodQhEZkVJm5ERFRucrkMXwxqAU8na1xPzcW0X86xvRtRNWLiRkREFVLLVonFQ1vCUiHDzosJWHXomtQhEZkNJm5ERFRhLevVwnt9GwMAPtoRgfAbdySOiMg8MHEjIqJKGdnRB32buaNIKzBhbTju5BRIHRKRyWPiRkRElSKTyfDxgKfh42KDWxl5mLLxDLRatncjepKYuBERUaU5WFli6WutobKQY29kMpbti5Y6JCKTxsSNiIgeSxMPB8x98SkAwOd/R+JwdIrEERGZLiZuRET02Aa18cLLrTyhFcDEn88gKStP6pCITBITNyIiemwymQwf9m+Khm52SMnOx8SfT6NIo5U6LCKTYyF1AEREZBpslBZY+lprvLD4II7GpOHzXVfwVlc/qcOqEJWFHFaWCqnDICqTTLDL61IyMzPh6OiIjIwMODg4SB0OEZFR+fVMPCatPyN1GJViqZBhxYi26NJQLXUoVAnm8PxmVSkREVWpF1t44u0eDWAhl0kdSoUVagQ2h9+UOgyiMrGqlIiIqtx/ejXCxJ7+MKY6nZPX0jD0f8dw8GoKtFoBuREmnmT6mLgREdETYakwrkqdNj7OsFUqkJpTgEu3M9HU01HqkIhKMa7fKiIioidEaSFHBz8XAMCBq+yLjmomJm5ERET3BPoXv5Rw4GqyxJEQGcbEjYiI6J5Af1cAwMlrd5BbUCRxNESlMXEjIiK6x9fVFp5O1ijQaHEsJk3qcIhKYeJGRER0j0wmQ5eGxaVu+1ldSjUQEzciIqL7/NvOjS8oUM3DxI2IiOg+nfxcIZcBUUnZuJV+V+pwiPQwcSMiIrqPo40lmns5AQAOstSNahgmbkRERA8oqS5lOzeqaZi4ERERPaDLvW5BDkalQKM1onG7yOQxcSMiInpAcy8n2KsskJ5biIu3MqQOh0iHiRsREdEDLBUc/opqJiZuREREBgQ2vNfO7QrbuVHNwcSNiIjIgJJ2bqeu30F2Poe/opqBiRsREZEB3i62qOdsgyKtwNHoVKnDIQLAxI2IiKhMJYPOH2C3IFRDMHEjIiIqA4e/opqGiRsREVEZOjZwgUIuQ0xKDuLScqUOh4iJGxERUVkcrCzRsmT4qyiWupH0mLgRERE9xL/VpWznRtJj4kZERPQQgQ3vDX91lcNfkfSYuBERET3E056OcLCyQGZeEc7dTJc6HDJzTNyIiIgewkIhR6cGJd2CsJ0bSYuJGxER0SOwnRvVFEzciIiIHqGkI97wG+nIyiuUOBoyZ0zciIiIHsHL2Qa+rrbQaAUOc/grkhATNyIionLg8FdUEzBxIyIiKgcOf0U1ARM3IiKicnimvjMs5DJcT83F9dQcqcMhM8XEjYiIqBzsrSzRyrsWAJa6kXSYuBEREZVTF7ZzI4kxcSMiIiqnknZuh6NSUaTRShwNmaMakbgtWbIEPj4+sLKyQvv27XH8+PEy5y0sLMTcuXPh5+cHKysrNG/eHDt37nysdRIREZVHU09HONlYIiu/CGc5/BVJQPLEbcOGDZg6dSrmzJmD8PBwNG/eHMHBwUhKSjI4/8yZM7F8+XIsWrQIly5dwltvvYWXXnoJp0+frvQ6iYiIykMhl+mGv9p/he3cqPrJhBBCygDat2+Ptm3bYvHixQAArVYLLy8vvP3225g+fXqp+T08PDBjxgyMHz9eN23AgAGwtrbGmjVrKrXOB2VmZsLR0REZGRlwcHCoit0kIiITseHEDby7+Txa1XPClnGdpA6H7mMOz29JS9wKCgpw6tQpBAUF6abJ5XIEBQXhyJEjBpfJz8+HlZWV3jRra2scPHiw0uskIiIqr8732rmdiUtHxl0Of0XVS9LELSUlBRqNBm5ubnrT3dzckJCQYHCZ4OBgfPHFF7h69Sq0Wi127dqFLVu24Pbt25VeZ35+PjIzM/U+REREhng6WcNPbQutAA5HsbqUqpfkbdwq6uuvv4a/vz8CAgKgVCoxYcIEhISEQC6v/K4sWLAAjo6Ouo+Xl1cVRkxERKam5O3S/ezPjaqZpImbq6srFAoFEhMT9aYnJibC3d3d4DJqtRrbtm1DTk4Orl+/jsuXL8POzg7169ev9DpDQ0ORkZGh+8TFxVXB3hERkanq0rDkBYVkSNxUnMyMpImbUqlE69atERYWppum1WoRFhaGDh06PHRZKysreHp6oqioCJs3b8aLL75Y6XWqVCo4ODjofYiIiMrS3tcFlgoZ4tPv4lpqrtThkBmRvKp06tSp+P7777F69WpERERg7NixyMnJQUhICABg+PDhCA0N1c1/7NgxbNmyBTExMThw4AB69+4NrVaLadOmlXudREREj8NWZYE23s4AOIoCVS8LqQMYPHgwkpOTMXv2bCQkJKBFixbYuXOn7uWCGzdu6LVfy8vLw8yZMxETEwM7Ozv07dsXP/30E5ycnMq9TiIioscV2NAVR2JSsf9KCoZ38JE6HDITkvfjVhOZQz8wRET0eC7EZ+D5RQdhq1TgzJxesFRIXoll9szh+c2rjIiIqBKa1HGAs60SOQUanL6RLnU4ZCaYuBEREVWCXC5D53vDX7GdG1UXJm5ERESVFOh/r1sQ9udG1YSJGxERUSWVdMR77mY60nMLJI6GzAETNyIiokpyd7RCQzc7CAEcikqVOhwyA0zciIiIHoNu+KsrbOdGTx4TNyIiosdQ0s7twFUOf0VPHhM3IiKix9De1wVKCzluZeQhOjlH6nDIxDFxIyIiegzWSgXa+XD4K6oeTNyIiIge07/VpewWhJ4sJm5ERESPqeQFhSPRqcgv0kgcDZkyJm5ERESPKcDdHq52Ktwt1CD8errU4ZAJY+JGRET0mORymd7bpURPChM3IiKiKsB2blQdKpW47dmzp6rjICIiMmolA85fuJWB1Ox8iaMhU1WpxK13797w8/PDhx9+iLi4uKqOiYiIyOjUdrBCgLt98fBX0Rz+ip6MSiVu8fHxmDBhAjZt2oT69esjODgYGzduREEBB9glIiLz1aUhh7+iJ6tSiZurqyumTJmCM2fO4NixY2jYsCHGjRsHDw8PTJw4EWfPnq3qOImIiGo8Dn9FT9pjv5zQqlUrhIaGYsKECcjOzsbKlSvRunVrBAYG4uLFi1URIxERkVFo6+MMlYUciZn5uJqULXU4ZIIqnbgVFhZi06ZN6Nu3L7y9vfHXX39h8eLFSExMRFRUFLy9vTFw4MCqjJWIiKhGs7JUoJ1v8fBXrC6lJ6FSidvbb7+NOnXq4M0330TDhg1x+vRpHDlyBKNHj4atrS18fHzw2Wef4fLly1UdLxERUY3W9V47N3YLQk+CRWUWunTpEhYtWoSXX34ZKpXK4Dyurq7sNoSIiMxO8fBXETgWm4q8Qg2sLBVSh0QmpFKJW1hY2KNXbGGBrl27Vmb1RERERquhmx1q26uQlJWPU9fvoNO9/t2IqkKlqkoXLFiAlStXlpq+cuVKLFy48LGDIiIiMlYymUw36Px+Dn9FVaxSidvy5csREBBQavpTTz2Fb7/99rGDIiIiMmZdGt7rFuQK27lR1apU4paQkIA6deqUmq5Wq3H79u3HDoqIiMiYlVSPXrqdieQsDn9FVadSiZuXlxcOHTpUavqhQ4fg4eHx2EEREREZM1c7FZ7ycAAAHIpiqRtVnUq9nDBmzBhMnjwZhYWF6NGjB4DiFxamTZuG//znP1UaIBERkTEK9Ffj4q1M7L+ajP4tPaUOh0xEpRK3d955B6mpqRg3bpxufFIrKyu8++67CA0NrdIAiYiIjFEXf1d8uy8aB66mQAgBmUwmdUhkAmTiMQZTy87ORkREBKytreHv719mn27GJjMzE46OjsjIyICDg4PU4RARkRHKL9Kg+Qd/I69Qiz8nBaJxHT5PnjRzeH4/1lildnZ2aNu2LZo2bWoySRsREVFVUFko8Ex9FwDFg84TVYVKVZUCwMmTJ7Fx40bcuHFDV11aYsuWLY8dGBERkbHr4q/G3shkHLiagje6+EkdDpmASpW4rV+/Hh07dkRERAS2bt2KwsJCXLx4Ebt374ajo2NVx0hERGSUSvpzOxabhrxCjcTRkCmoVOL20Ucf4csvv8Tvv/8OpVKJr7/+GpcvX8agQYNQr169qo6RiIjIKPmp7VDH0QoFRVocj02TOhwyAZVK3KKjo/Hcc88BAJRKJXJyciCTyTBlyhR89913VRogERGRsSoe/ureKAps50ZVoFKJW61atZCVlQUA8PT0xIULFwAA6enpyM3NrbroiIiIjFzJuKUHrrIjXnp8lUrcunTpgl27dgEABg4ciEmTJmHMmDEYMmQIevbsWaUBEhERGbNODVwhkwGXE7KQlJkndThk5Cr1VunixYuRl1d88c2YMQOWlpY4fPgwBgwYgJkzZ1ZpgERERMbM2VaJZp6OOHczAweupmBA67pSh0RGrMKJW1FREf744w8EBwcDAORyOaZPn17lgREREZmKQH/Xe4lbMhM3eiwVriq1sLDAW2+9pStxIyIiooe7v52bVlvpAYuIKtfGrV27djhz5kwVh0JERGSaWtWrBRulAqk5Bbh0O1PqcMiIVaqN27hx4zB16lTExcWhdevWsLW11fv+6aefrpLgiIiITIHSQo4O9V0QdjkJB66moKknO6unyqnUIPNyeemCOplMBiEEZDIZNBrj7h3aHAapJSKi6vXDoVi8//sldPRzwboxz0gdjkkyh+d3pUrcYmNjqzoOIiIik9alYXE7t5PX7iC3oAg2ykoPF05mrFJXjbe3d1XHQUREZNJ8XW3h6WSN+PS7OBabhu6NaksdEhmhSiVuP/7440O/Hz58eKWCISIiMlUymQxdGrri5+NxOHAlhYkbVUqlErdJkybp/VxYWIjc3FwolUrY2NgwcSMiIjIg0F9dnLhx3FKqpEp1B3Lnzh29T3Z2NiIjI9G5c2f8/PPPVR0jERGRSejo5wK5DLialI3bGXelDoeMUKUSN0P8/f3x8ccflyqNIyIiomJONko8XdcJAAedp8qpssQNKB5V4datW1W5SiIiIpPSxd8VABM3qpxKtXH77bff9H4WQuD27dtYvHgxOnXqVCWBERERmaLAhmp8szsKB68mQ6sVkMtlUodERqRSiVv//v31fpbJZFCr1ejRowc+//zzqoiLiIjIJLXwcoKdygJ3cgtx4VaGruqUqDwqlbhptdqqjoOIiMgsWCrk6ODngl2XEnHgagoTN6qQKm3jRkRERI9W0s5t/xV2C0IVU6nEbcCAAVi4cGGp6Z988gkGDhz42EERERGZspLhr8Jv3EF2fpHE0ZAxqVTitn//fvTt27fU9D59+mD//v0VWteSJUvg4+MDKysrtG/fHsePH3/o/F999RUaNWoEa2treHl5YcqUKcjLy9N9n5WVhcmTJ8Pb2xvW1tbo2LEjTpw4UaGYiIiIniRvF1vUc7ZBoUbgWEyq1OGQEalU4padnQ2lUllquqWlJTIzM8u9ng0bNmDq1KmYM2cOwsPD0bx5cwQHByMpKcng/OvWrcP06dMxZ84cREREYMWKFdiwYQPee+893TyjR4/Grl278NNPP+H8+fPo1asXgoKCEB8fX/EdJSIiekIC2S0IVUKlErdmzZphw4YNpaavX78eTZo0Kfd6vvjiC4wZMwYhISFo0qQJvv32W9jY2GDlypUG5z98+DA6deqEoUOHwsfHB7169cKQIUN0pXR3797F5s2b8cknn6BLly5o0KAB3n//fTRo0ADLli2rzK4SERE9EYH+xdWl+zn8FVVApd4qnTVrFl5++WVER0ejR48eAICwsDD8/PPP+OWXX8q1joKCApw6dQqhoaG6aXK5HEFBQThy5IjBZTp27Ig1a9bg+PHjaNeuHWJiYrBjxw4MGzYMAFBUVASNRgMrKyu95aytrXHw4MEyY8nPz0d+fr7u54qUGhIREVVGBz8XKOQyxCTn4OadXNStZSN1SGQEKlXi1q9fP2zbtg1RUVEYN24c/vOf/+DmzZv4559/SvXxVpaUlBRoNBq4ubnpTXdzc0NCQoLBZYYOHYq5c+eic+fOsLS0hJ+fH7p166arKrW3t0eHDh0wb9483Lp1CxqNBmvWrMGRI0dw+/btMmNZsGABHB0ddR8vL6/yHQgiIqJKcrS2RAsvJwDAQVaXUjlVujuQ5557DocOHUJOTg5SUlKwe/dudO3atSpjK2Xv3r346KOPsHTpUoSHh2PLli3Yvn075s2bp5vnp59+ghACnp6eUKlU+OabbzBkyBDI5WXvamhoKDIyMnSfuLi4J7ofREREANu5UcVVqqr0xIkT0Gq1aN++vd70Y8eOQaFQoE2bNo9ch6urKxQKBRITE/WmJyYmwt3d3eAys2bNwrBhwzB69GgAxW3tcnJy8MYbb2DGjBmQy+Xw8/PDvn37kJOTg8zMTNSpUweDBw9G/fr1y4xFpVJBpVI9MmYiIqKqFOivxlf/XMXBqBRotAIKDn9Fj1CpErfx48cbLJWKj4/H+PHjy7UOpVKJ1q1bIywsTDdNq9UiLCwMHTp0MLhMbm5uqZIzhUIBoHi81PvZ2tqiTp06uHPnDv766y+8+OKL5YqLiIioujSv6wh7Kwtk3C3EuZvpUodDRqBSJW6XLl1Cq1atSk1v2bIlLl26VO71TJ06FSNGjECbNm3Qrl07fPXVV8jJyUFISAgAYPjw4fD09MSCBQsAFLet++KLL9CyZUu0b98eUVFRmDVrFvr166dL4P766y8IIdCoUSNERUXhnXfeQUBAgG6dRERENYWFQo5Ofq7YeTEBB66moGW9WlKHRDVcpRI3lUqFxMTEUtWPt2/fhoVF+Vc5ePBgJCcnY/bs2UhISECLFi2wc+dO3QsLN27c0CthmzlzJmQyGWbOnIn4+Hio1Wr069cP8+fP182TkZGB0NBQ3Lx5E87OzhgwYADmz58PS0vLyuwqERHRExXYsCRxS8bEnv5Sh0M1nEw8WMdYDkOGDMHt27fx66+/wtHREQCQnp6O/v37o3bt2ti4cWOVB1qdMjMz4ejoiIyMDDg4OEgdDhERmbC4tFwEfrIHCrkMZ2Y/C3srFjRUljk8vyvVxu2zzz5DXFwcvL290b17d3Tv3h2+vr5ISEjA559/XtUxEhERmSwvZxv4utpCoxU4Es3hr+jhKpW4eXp64ty5c/jkk0/QpEkTtG7dGl9//TXOnz/PPtCIiIgqiN2CUHlVqo0bUPzWZufOnVGvXj0UFBQAAP78808AwAsvvFA10REREZmBQH81fjxyHQc4/BU9QqUSt5iYGLz00ks4f/48ZDIZhBCQyf7te0aj0VRZgERERKbumfrOsJDLcC01FzdSc1HPhcNfkWGVqiqdNGkSfH19kZSUBBsbG1y4cAH79u1DmzZtsHfv3ioOkYiIyLTZW1mi1b2uQA5EsdSNylapxO3IkSOYO3cuXF1dIZfLoVAo0LlzZyxYsAATJ06s6hiJiIhMnq6d2xW2c6OyVSpx02g0sLe3B1A8dNWtW7cAAN7e3oiMjKy66IiIiMxEYEM1AOBQdAqKNFqJo6GaqlJt3Jo2bYqzZ8/C19cX7du3xyeffAKlUonvvvvuoWOCEhERkWHNPB3haG2JjLuFOHszA629OYoClVapEreZM2dCqy3+a2Du3LmIjY1FYGAgduzYgW+++aZKAyQiIjIHCrkMnRsUV5fuv8J2bmRYpUrcgoODdf9v0KABLl++jLS0NNSqVUvv7VIiIiIqv0B/V2w/fxsHriZjyrMNpQ6HaqBKlbgZ4uzszKSNiIjoMZS0czsTl46Mu4USR0M1UZUlbkRERPR4PJ2s4ae2hVYAR6L5dimVxsSNiIioBgn0Ly5128/hr8gAJm5EREQ1SJeG/76gIISQOBqqaZi4ERER1SDtfV1gqZDh5p27uJ6aK3U4VMMwcSMiIqpBbFUWuj7cOOg8PYiJGxERUQ3Ddm5UFiZuRERENUyXe4nbkehUFHL4K7oPEzciIqIa5ikPB9SysUR2fhHOxKVLHQ7VIEzciIiIahi5XIbOJdWlHP6K7sPEjYiIqAYK9L/XLQjbudF9mLgRERHVQCWJ27mb6UjPLZA4GqopmLgRERHVQHUcrdHQzQ5CAIeiUqUOh2oIJm5EREQ1VEm3IOzPjUowcSMiIqqhSqpLD1xN4fBXBICJGxERUY3V3tcFSoUc8el3EZOSI3U4VAMwcSMiIqqhrJUKtPW9N/wVuwUhMHEjIiKq0f5t58ZuQYiJGxERUY1W0s7tSEwqCoo4/JW5Y+JGRERUgzV2d4CrnRK5BRqE37gjdTgkMSZuRERENZhcLkPnBiVvl7Kdm7lj4kZERFTDBerGLWU7N3PHxI2IiKiGK2nnduFWBlKz8yWOhqTExI2IiKiGq+1ghQB3++Lhr6I5/JU5Y+JGRERkBLo0vNctCPtzM2tM3IiIiIwAh78igIkbERGRUWjr4wyVhRwJmXmISsqWOhySCBM3IiIiI2BlqUA7X2cAwH6OomC2mLgREREZiS664a/Yzs1cMXEjIiIyEoENi9u5HY1JRX6RRuJoSApM3IiIiIxEIzd7qO1VyCvU4tQ1Dn9ljpi4ERERGQmZTKZ7u5Tt3MwTEzciIiIj0kU3/BXbuZkjJm5ERERGpNO9Aecv3c5EchaHvzI3TNyIiIiMiNpehSZ1HAAAh6JYXWpumLgREREZmZLhr/azWxCzw8SNiIjIyHTh8Fdmi4kbERGRkWntUwtWlnIkZ+UjMjFL6nCoGjFxIyIiMjIqCwWeqe8CADhwhe3czAkTNyIiIiMU6M92buaIiRsREZERKmnndjw2DXmFHP7KXDBxIyIiMkINatvB3cEK+UVanLiWJnU4VE2YuBERERmh+4e/2nE+QeJoqLowcSMiIjJSA1rXBQBsPnUTCRl5EkdD1YGJGxERkZF6pr4L2vk4o0CjxfL90VKHQ9VA8sRtyZIl8PHxgZWVFdq3b4/jx48/dP6vvvoKjRo1grW1Nby8vDBlyhTk5f37V4ZGo8GsWbPg6+sLa2tr+Pn5Yd68eeygkIiITNLbPRsAANYdu8GxS82ApInbhg0bMHXqVMyZMwfh4eFo3rw5goODkZSUZHD+devWYfr06ZgzZw4iIiKwYsUKbNiwAe+9955unoULF2LZsmVYvHgxIiIisHDhQnzyySdYtGhRde0WERFRtencwBUtvJyQX6TF/w7ESB0OPWGSJm5ffPEFxowZg5CQEDRp0gTffvstbGxssHLlSoPzHz58GJ06dcLQoUPh4+ODXr16YciQIXqldIcPH8aLL76I5557Dj4+PnjllVfQq1evR5bkERERGSOZTIaJ90rdfjp6HWk5BRJHRE+SZIlbQUEBTp06haCgoH+DkcsRFBSEI0eOGFymY8eOOHXqlC4Ji4mJwY4dO9C3b1+9ecLCwnDlyhUAwNmzZ3Hw4EH06dOnzFjy8/ORmZmp9yEiIjIW3RvVRlNPB+QWaLDiIEvdTJlkiVtKSgo0Gg3c3Nz0pru5uSEhwfBrzUOHDsXcuXPRuXNnWFpaws/PD926ddOrKp0+fTpeffVVBAQEwNLSEi1btsTkyZPx2muvlRnLggUL4OjoqPt4eXlVzU4SERFVA5lMhgnd/QEAqw9fR0ZuocQR0ZMi+csJFbF371589NFHWLp0KcLDw7FlyxZs374d8+bN082zceNGrF27FuvWrUN4eDhWr16Nzz77DKtXry5zvaGhocjIyNB94uLiqmN3iIiIqkyvJm4IcLdHdn4RVh2OlTocekIspNqwq6srFAoFEhMT9aYnJibC3d3d4DKzZs3CsGHDMHr0aABAs2bNkJOTgzfeeAMzZsyAXC7HO++8oyt1K5nn+vXrWLBgAUaMGGFwvSqVCiqVqgr3joiIqHrJ5TJM6NEAE9adxsqDsXi9sy/srSylDouqmGQlbkqlEq1bt0ZYWJhumlarRVhYGDp06GBwmdzcXMjl+iErFAoA0HX3UdY8Wq22KsMnIiKqcfo0rQM/tS0y84rw45HrUodDT4CkVaVTp07F999/j9WrVyMiIgJjx45FTk4OQkJCAADDhw9HaGiobv5+/fph2bJlWL9+PWJjY7Fr1y7MmjUL/fr10yVw/fr1w/z587F9+3Zcu3YNW7duxRdffIGXXnpJkn0kIiKqLop7pW4AsOJgLHLyiySOiKqaZFWlADB48GAkJydj9uzZSEhIQIsWLbBz507dCws3btzQKz2bOXMmZDIZZs6cifj4eKjVal2iVmLRokWYNWsWxo0bh6SkJHh4eODNN9/E7Nmzq33/iIiIqlu/pz3w1T9XcT01F2uPXccbXfykDomqkExwSIFSMjMz4ejoiIyMDDg4OEgdDhERUYVsPBGHaZvPwdVOhYPvdoeVpULqkKqFOTy/jeqtUiIiInq0l1p5wtPJGinZ+fj5+A2pw6EqxMSNiIjIxFgq5BjXvbiKdPm+GOQXaSSOiKoKEzciIiIT9ErruqjjaIWEzDz8cvKm1OFQFWHiRkREZIJUFgq82aU+AGDZ3mgUatgtlilg4kZERGSiXm1XD652KsSn38XW8Hipw6EqwMSNiIjIRFlZ/lvqtnhPFIpY6mb0mLgRERGZsNeeqQdnWyVupOXit7O3pA6HHhMTNyIiIhNmo7TA6519ARSXumm07L7VmDFxIyIiMnHDO3jD0doSMck52HH+ttTh0GNg4kZERGTi7K0sMarTvVK33VHQstTNaDFxIyIiMgMjO/nAXmWByMQs/H0pUepwqJKYuBEREZkBR2tLjOjoAwBYtPsqOFS5cWLiRkREZCZGdfaFjVKBi7cysftyktThUCUwcSMiIjITzrZKDHvGGwDwze4olroZISZuREREZmR0YH1YWcpxNi4dB66mSB0OVRATNyIiIjOitldhaLviUje2dTM+TNyIiIjMzJtd60NpIceJa3dwNCZN6nCoApi4ERERmRk3BysMbuMFoLjUjYwHEzciIiIz9FY3P1gqZDgcnYqT11jqZiyYuBEREZkhTydrDGhVF0DxG6ZkHJi4ERERmalx3RpAIZdh/5VknIlLlzocKgcmbkRERGaqnosNXmzhAQBYzLZuRoGJGxERkRkb370B5DLgn4gkXLyVIXU49AhM3IiIiMyYn9oOzz9dUurGtm41HRM3IiIiMzehRwMAwJ8XEnAlMUviaOhhmLgRERGZuYZu9ujT1B0AS91qOiZuREREpCt1+/3cLUQnZ0scDZWFiRsRERHhKQ9HBDWuDSGAJXtY6lZTMXEjIiIiAMDbPfwBAL+euYUbqbkSR0OGMHEjIiIiAEBzLyd0baiGRiuwdC9L3WoiJm5ERESkM7FncVu3zeE3EZ9+V+Jo6EFM3IiIiEintbczOvq5oFAj8O3eaKnDoQcwcSMiIiI9JW3dNpyIQ0JGnsTR0P2YuBEREZGeZ+o7o61PLRRotFi+n6VuNQkTNyIiItIjk8l0pW7rjt1Acla+xBFRCSZuREREVEqgvytaeDkhv0iL/x2IkTocuoeJGxEREZUik8l0b5j+dPQ60nIKJI6IACZuREREVIbujWqjqacDcgs0WHkwVupwCEzciIiIqAwymQwTuhe3dVt9+Boy7hZKHBExcSMiIqIy9WrihkZu9sjKL8IPh65JHY7ZY+JGREREZZLLZZjQo7it24qDMcjKY6mblJi4ERER0UP1bVYH9dW2yMwrwo9Hrksdjllj4kZEREQPpZDL8Lau1C0WuQVFEkdkvpi4ERER0SP1e9oD3i42SMspwNqjN6QOx2wxcSMiIqJHslDIMb5bcanb8v0xyCvUSByReWLiRkREROXyUitPeDpZIyU7H+uPs9RNCkzciIiIqFwsFXKM7eYHAPh2Xwzyi1jqVt2YuBEREVG5DWxTF+4OVkjIzMMvJ29KHY7ZYeJGRERE5aayUODNrvUBAMv2RqNQo5U4IvPCxI2IiIgqZEi7enC1UyE+/S62hsdLHY5ZYeJGREREFWJlqcCbXYpL3ZbsjUIRS92qDRM3IiIiqrDXnqkHZ1slrqfm4vdzt6QOx2wwcSMiIqIKs1Fa4PXOvgCAxbujoNEKiSMyD0zciIiIqFKGd/CGo7UlopNz8OeF21KHYxaYuBEREVGl2FtZIqSTDwBgUVgUtCx1e+JqROK2ZMkS+Pj4wMrKCu3bt8fx48cfOv9XX32FRo0awdraGl5eXpgyZQry8vJ03/v4+EAmk5X6jB8//knvChERkVkJ6egLO5UFIhOz8PelRKnDMXmSJ24bNmzA1KlTMWfOHISHh6N58+YIDg5GUlKSwfnXrVuH6dOnY86cOYiIiMCKFSuwYcMGvPfee7p5Tpw4gdu3b+s+u3btAgAMHDiwWvaJiIjIXDjaWGJkRx8AwKLdVyEES92eJMkTty+++AJjxoxBSEgImjRpgm+//RY2NjZYuXKlwfkPHz6MTp06YejQofDx8UGvXr0wZMgQvVI6tVoNd3d33eePP/6An58funbtWl27RUREZDZGdfaFjVKBi7cysSfScMELVQ1JE7eCggKcOnUKQUFBumlyuRxBQUE4cuSIwWU6duyIU6dO6RK1mJgY7NixA3379i1zG2vWrMGoUaMgk8kMzpOfn4/MzEy9DxEREZWPs60Sw57xBgB8ExbFUrcnSNLELSUlBRqNBm5ubnrT3dzckJCQYHCZoUOHYu7cuejcuTMsLS3h5+eHbt266VWV3m/btm1IT0/HyJEjy4xjwYIFcHR01H28vLwqvU9ERETmaHRgfVhZynEmLh0Ho1KkDsdkSV5VWlF79+7FRx99hKVLlyI8PBxbtmzB9u3bMW/ePIPzr1ixAn369IGHh0eZ6wwNDUVGRobuExcX96TCJyIiMklqexWGtKsHAPgmjG3dnhQLKTfu6uoKhUKBxET9t1ASExPh7u5ucJlZs2Zh2LBhGD16NACgWbNmyMnJwRtvvIEZM2ZALv83F71+/Tr++ecfbNmy5aFxqFQqqFSqx9wbIiIi8/ZmFz+sPXoDJ67dwdGYNHTwc5E6JJMjaYmbUqlE69atERYWppum1WoRFhaGDh06GFwmNzdXLzkDAIVCAQClsvtVq1ahdu3aeO6556o4ciIiInqQu6MVBrWtC6D4DVOqepJXlU6dOhXff/89Vq9ejYiICIwdOxY5OTkICQkBAAwfPhyhoaG6+fv164dly5Zh/fr1iI2Nxa5duzBr1iz069dPl8ABxQngqlWrMGLECFhYSFqwSEREZDbGdmsAS4UMh6NTcep6mtThmBzJM5rBgwcjOTkZs2fPRkJCAlq0aIGdO3fqXli4ceOGXgnbzJkzIZPJMHPmTMTHx0OtVqNfv36YP3++3nr/+ecf3LhxA6NGjarW/SEiIjJnnk7WGNCqLtafiMM3YVFYPaqd1CGZFJlg68FSMjMz4ejoiIyMDDg4OEgdDhERkVG5kZqL7p/vhUYr8Ov4Tmju5VQt2zWH57fkVaVERERkWuq52ODFFsW9OSzaHSVxNKaFiRsRERFVufHdG0AmA/6JSMTFWxlSh2MymLgRERFRlfNT2+H5p4tL3Raz1K3KMHEjIiKiJ2JC9wYAgD8vJOBKYpbE0ZgGJm5ERET0RDRyt0efpsUd6rPUrWowcSMiIqInZkKP4lK3P87dQkxytsTRGD8mbkRERPTEPOXhiKDGtaEVwJI90VKHY/SYuBEREdET9XYPfwDAtjPxuJGaK3E0xo2JGxERET1Rzb2c0KWhGhqtwLJ9bOv2OJi4ERER0RM38V5bt02nbiI+/a7E0RgvJm5ERET0xLXxcUaH+i4o1Ah8u5dt3SqLiRsRERFVi4k9i9u6bTgZh8TMPImjMU5M3IiIiKhaPFPfGW19aqGgSIvl+2KkDscoMXEjIiKiaiGTyfB2D3/IZUBuQZHU4RglC6kDICIiIvMR6O+Kfe90h5ezjdShGCWWuBEREVG1kclkTNoeAxM3IiIiIiPBxI2IiIjISDBxIyIiIjISTNyIiIiIjAQTNyIiIiIjwcSNiIiIyEgwcSMiIiIyEkzciIiIiIwEEzciIiIiI8HEjYiIiMhIMHEjIiIiMhJM3IiIiIiMBBM3IiIiIiNhIXUANZEQAgCQmZkpcSRERERUXiXP7ZLnuCli4mZAVlYWAMDLy0viSIiIiKiisrKy4OjoKHUYT4RMmHJaWklarRa3bt2Cvb09ZDJZla47MzMTXl5eiIuLg4ODQ5Wum/7F41w9eJyrB49z9eBxrj5P6lgLIZCVlQUPDw/I5abZGowlbgbI5XLUrVv3iW7DwcGBN4ZqwONcPXicqwePc/Xgca4+T+JYm2pJWwnTTEeJiIiITBATNyIiIiIjwcStmqlUKsyZMwcqlUrqUEwaj3P14HGuHjzO1YPHufrwWFceX04gIiIiMhIscSMiIiIyEkzciIiIiIwEEzciIiIiI8HEjYiIiMhIMHGrRkuWLIGPjw+srKzQvn17HD9+XOqQTM6CBQvQtm1b2Nvbo3bt2ujfvz8iIyOlDsukffzxx5DJZJg8ebLUoZik+Ph4/N///R9cXFxgbW2NZs2a4eTJk1KHZVI0Gg1mzZoFX19fWFtbw8/PD/PmzTPp8S6rw/79+9GvXz94eHhAJpNh27Ztet8LITB79mzUqVMH1tbWCAoKwtWrV6UJ1ogwcasmGzZswNSpUzFnzhyEh4ejefPmCA4ORlJSktShmZR9+/Zh/PjxOHr0KHbt2oXCwkL06tULOTk5Uodmkk6cOIHly5fj6aefljoUk3Tnzh106tQJlpaW+PPPP3Hp0iV8/vnnqFWrltShmZSFCxdi2bJlWLx4MSIiIrBw4UJ88sknWLRokdShGbWcnBw0b94cS5YsMfj9J598gm+++Qbffvstjh07BltbWwQHByMvL6+aIzUygqpFu3btxPjx43U/azQa4eHhIRYsWCBhVKYvKSlJABD79u2TOhSTk5WVJfz9/cWuXbtE165dxaRJk6QOyeS8++67onPnzlKHYfKee+45MWrUKL1pL7/8snjttdckisj0ABBbt27V/azVaoW7u7v49NNPddPS09OFSqUSP//8swQRGg+WuFWDgoICnDp1CkFBQbppcrkcQUFBOHLkiISRmb6MjAwAgLOzs8SRmJ7x48fjueee07uuqWr99ttvaNOmDQYOHIjatWujZcuW+P7776UOy+R07NgRYWFhuHLlCgDg7NmzOHjwIPr06SNxZKYrNjYWCQkJevcPR0dHtG/fns/FR+Ag89UgJSUFGo0Gbm5uetPd3Nxw+fJliaIyfVqtFpMnT0anTp3QtGlTqcMxKevXr0d4eDhOnDghdSgmLSYmBsuWLcPUqVPx3nvv4cSJE5g4cSKUSiVGjBghdXgmY/r06cjMzERAQAAUCgU0Gg3mz5+P1157TerQTFZCQgIAGHwulnxHhjFxI5M1fvx4XLhwAQcPHpQ6FJMSFxeHSZMmYdeuXbCyspI6HJOm1WrRpk0bfPTRRwCAli1b4sKFC/j222+ZuFWhjRs3Yu3atVi3bh2eeuopnDlzBpMnT4aHhwePM9U4rCqtBq6urlAoFEhMTNSbnpiYCHd3d4miMm0TJkzAH3/8gT179qBu3bpSh2NSTp06haSkJLRq1QoWFhawsLDAvn378M0338DCwgIajUbqEE1GnTp10KRJE71pjRs3xo0bNySKyDS98847mD59Ol599VU0a9YMw4YNw5QpU7BgwQKpQzNZJc8+PhcrjolbNVAqlWjdujXCwsJ007RaLcLCwtChQwcJIzM9QghMmDABW7duxe7du+Hr6yt1SCanZ8+eOH/+PM6cOaP7tGnTBq+99hrOnDkDhUIhdYgmo1OnTqW6s7ly5Qq8vb0lisg05ebmQi7XfxwqFApotVqJIjJ9vr6+cHd313suZmZm4tixY3wuPgKrSqvJ1KlTMWLECLRp0wbt2rXDV199hZycHISEhEgdmkkZP3481q1bh19//RX29va6thKOjo6wtraWODrTYG9vX6rNoK2tLVxcXNiWsIpNmTIFHTt2xEcffYRBgwbh+PHj+O677/Ddd99JHZpJ6devH+bPn4969erhqaeewunTp/HFF19g1KhRUodm1LKzsxEVFaX7OTY2FmfOnIGzszPq1auHyZMn48MPP4S/vz98fX0xa9YseHh4oH///tIFbQykfq3VnCxatEjUq1dPKJVK0a5dO3H06FGpQzI5AAx+Vq1aJXVoJo3dgTw5v//+u2jatKlQqVQiICBAfPfdd1KHZHIyMzPFpEmTRL169YSVlZWoX7++mDFjhsjPz5c6NKO2Z88eg/fjESNGCCGKuwSZNWuWcHNzEyqVSvTs2VNERkZKG7QRkAnBrqGJiIiIjAHbuBEREREZCSZuREREREaCiRsRERGRkWDiRkRERGQkmLgRERERGQkmbkRERERGgokbERERkZFg4kZkhrp164bJkyfrfvbx8cFXX30lWTzlce3aNchkMpw5c0bqUKrE+++/jxYtWkgdRqlrgYhqNg55RUQ4ceIEbG1tpQ7joby8vHD79m24urpKHQoRkWSYuBER1Gq11CE8kkKhgLu7u9Rh6CkoKIBSqZQ6DCIyI6wqJTJxOTk5GD58OOzs7FCnTh18/vnnpeZ5sKpUJpNh+fLleP7552FjY4PGjRvjyJEjiIqKQrdu3WBra4uOHTsiOjpabz2//vorWrVqBSsrK9SvXx8ffPABioqK9Nb7v//9Dy+99BJsbGzg7++P3377Tff9nTt38Nprr0GtVsPa2hr+/v5YtWoVAMNVpfv27UO7du2gUqlQp04dTJ8+XW973bp1w8SJEzFt2jQ4OzvD3d0d77//vu57IQTef/991KtXDyqVCh4eHpg4cWKZx7KkevN///sffH19YWVlBQBIT0/H6NGjoVar4eDggB49euDs2bN6y3788cdwc3ODvb09Xn/9deTl5el9b6jKsn///hg5cqTu5/z8fLz77rvw8vKCSqVCgwYNsGLFCt33Fy5cQJ8+fWBnZwc3NzcMGzYMKSkpuu/Lcy0QUc3GxI3IxL3zzjvYt28ffv31V/z999/Yu3cvwsPDH7ncvHnzMHz4cJw5cwYBAQEYOnQo3nzzTYSGhuLkyZMQQmDChAm6+Q8cOIDhw4dj0qRJuHTpEpYvX44ffvgB8+fP11vvBx98gEGDBuHcuXPo27cvXnvtNaSlpQEAZs2ahUuXLuHPP/9EREQEli1bVmbVaHx8PPr27Yu2bdvi7NmzWLZsGVasWIEPP/xQb77Vq1fD1tYWx44dwyeffIK5c+di165dAIDNmzfjyy+/xPLly3H16lVs27YNzZo1e+hxiYqKwubNm7FlyxZdEjlw4EAkJSXhzz//xKlTp9CqVSv07NlTt18bN27E+++/j48++ggnT55EnTp1sHTp0keegwcNHz4cP//8M7755htERERg+fLlsLOzA1CcPPbo0QMtW7bEyZMnsXPnTiQmJmLQoEG65St7LRBRDSLtGPdE9CRlZWUJpVIpNm7cqJuWmpoqrK2txaRJk3TTvL29xZdffqn7GYCYOXOm7ucjR44IAGLFihW6aT///LOwsrLS/dyzZ0/x0Ucf6W3/p59+EnXq1ClzvdnZ2QKA+PPPP4UQQvTr10+EhIQY3JfY2FgBQJw+fVoIIcR7770nGjVqJLRarW6eJUuWCDs7O6HRaIQQQnTt2lV07txZbz1t27YV7777rhBCiM8//1w0bNhQFBQUGNzmg+bMmSMsLS1FUlKSbtqBAweEg4ODyMvL05vXz89PLF++XAghRIcOHcS4ceP0vm/fvr1o3ry57ueuXbvqnRMhhHjxxRfFiBEjhBBCREZGCgBi165dBmObN2+e6NWrl960uLg4AUBERkaW+1ogopqNJW5EJiw6OhoFBQVo3769bpqzszMaNWr0yGWffvpp3f/d3NwAQK80ys3NDXl5ecjMzAQAnD17FnPnzoWdnZ3uM2bMGNy+fRu5ubkG12trawsHBwckJSUBAMaOHYv169ejRYsWmDZtGg4fPlxmfBEREejQoQNkMpluWqdOnZCdnY2bN28a3B4A1KlTR7e9gQMH4u7du6hfvz7GjBmDrVu36lW1GuLt7a3XJvDs2bPIzs6Gi4uL3r7HxsbqqpIjIiL0zgEAdOjQ4aHbedCZM2egUCjQtWtXg9+fPXsWe/bs0YshICAAQPF18DjXAhHVHHw5gYgMsrS01P2/JDkyNE2r1QIAsrOz8cEHH+Dll18uta6StmAPrqNkPSXr6NOnD65fv44dO3Zg165d6NmzJ8aPH4/PPvusSvbjwe15eXkhMjIS//zzD3bt2oVx48bh008/xb59+0otV+LBt2+zs7NRp04d7N27t9S8Tk5O5Y5TLpdDCKE3rbCwUPd/a2vrhy6fnZ2Nfv36YeHChaW+q1OnDqKiosodCxHVXCxxIzJhfn5+sLS0xLFjx3TT7ty5gytXrlT5tlq1aoXIyEg0aNCg1EcuL/+tRq1WY8SIEVizZg2++uorfPfddwbnK3lh4v5k59ChQ7C3t0fdunXLvT1ra2v069cP33zzDfbu3YsjR47g/Pnz5V6+VatWSEhIgIWFRan9Lmmf17hxY71zAABHjx7V+1mtVuP27du6nzUaDS5cuKD7uVmzZtBqtdi3b1+ZcVy8eBE+Pj6l4rC1ta3Wa4GInhwmbkQmzM7ODq+//jreeecd7N69GxcuXMDIkSMrlEiV1+zZs/Hjjz/igw8+wMWLFxEREYH169dj5syZFVrHr7/+iqioKFy8eBF//PEHGjdubHDecePGIS4uDm+//TYuX76MX3/9FXPmzMHUqVPLvX8//PADVqxYgQsXLiAmJgZr1qyBtbU1vL29yx1zUFAQOnTogP79++Pvv//GtWvXcPjwYcyYMQMnT54EAEyaNAkrV67EqlWrcOXKFcyZMwcXL17UW0+PHj2wfft2bN++HZcvX8bYsWORnp6u+97HxwcjRozAqFGjsG3bNsTGxmLv3r3YuHEjAGD8+PFIS0vDkCFDcOLECURHR+Ovv/5CSEgINBpNtV4LRPTksKqUyMR9+umnumo0e3t7/Oc//0FGRkaVbyc4OBh//PEH5s6di4ULF8LS0hIBAQEYPXp0udehVCoRGhqKa9euwdraGoGBgVi/fr3BeT09PbFjxw688847aN68OZydnfH6669XKFF0cnLCxx9/jKlTp0Kj0aBZs2b4/fff4eLiUu51yGQy7NixAzNmzEBISAiSk5Ph7u6OLl266NoGDh48GNHR0Zg2bRry8vIwYMAAjB07Fn/99ZduPaNGjcLZs2cxfPhwWFhYYMqUKejevbvetpYtW4b33nsP48aNQ2pqKurVq4f33nsPAODh4YFDhw7h3XffRa9evZCfnw9vb2/07t1bl5xV17VARE+OTDzYqIKIiIiIaiSWkRMREREZCSZuREREREaCiRsRERGRkWDiRkRERGQkmLgRERERGQkmbkRERERGgokbERERkZFg4kZERERkJJi4ERERERkJJm5ERERERoKJGxEREZGRYOJGREREZCT+HxAw0pgsenzMAAAAAElFTkSuQmCC\n"
+ },
+ "metadata": {}
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "As can be seen, the accuracy of the model drops off once PCA drops 9 dimensions. i.e. the accuracy drops off when PCA projects the data to 3 dimensions or less"
+ ],
+ "metadata": {
+ "id": "eDLdI9APnJzQ"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [],
+ "metadata": {
+ "id": "0GqOmTCkonxi"
+ },
+ "execution_count": null,
+ "outputs": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 31e09f23..22a94e88 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
extras = dict()
-extras['test'] = ['cmake', 'ninja', 'nle>=0.9.0', 'matplotlib>=3.7.1', 'scipy==1.10.0', 'tensorboard>=2.13.0', 'shimmy']
+extras['test'] = ['cmake', 'matplotlib>=3.7.1', 'scipy==1.10.0', 'tensorboard>=2.13.0', 'shimmy']
extras['docs'] = ['sphinx-tabs', 'sphinxcontrib-spelling', 'furo']
extras['all'] = extras['test'] + extras['docs']
diff --git a/syllabus/core/task_interface/task_wrapper.py b/syllabus/core/task_interface/task_wrapper.py
index 1de3970d..e114a2b3 100644
--- a/syllabus/core/task_interface/task_wrapper.py
+++ b/syllabus/core/task_interface/task_wrapper.py
@@ -85,6 +85,24 @@ def __getattr__(self, attr):
return env_attr
+# <<<<<<< HEAD
+# class PettingZooTaskWrapper(TaskWrapper, BaseParallelWraper):
+# def __init__(self, env: pettingzoo.ParallelEnv):
+# super().__init__(env)
+# self.task = None
+
+# @property
+# def agents(self):
+# return self.env.agents
+
+# def __getattr__(self, attr):
+# env_attr = getattr(self.env, attr, None)
+# if env_attr:
+# return env_attr
+
+# def get_current_task(self):
+# return self.current_task
+# =======
class PettingZooTaskWrapper(BaseParallelWrapper):
def __init__(self, env: pettingzoo.ParallelEnv):
super().__init__(env)
@@ -159,3 +177,4 @@ def _task_completion(self, obs, rew, term, trunc, info) -> float:
"""
# return 1.0 if term or trunc else 0.0
return info
+# >>>>>>> b88c2fcba4658545e156188c85f48f0b1e54aab2
diff --git a/syllabus/examples/custom_envs/__init__.py b/syllabus/examples/custom_envs/__init__.py
new file mode 100644
index 00000000..d80c350e
--- /dev/null
+++ b/syllabus/examples/custom_envs/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from custom_envs.obstructedmaze_gamut import ObstructedMazeGamut
diff --git a/syllabus/examples/custom_envs/obstructedmaze_fixedgrid.py b/syllabus/examples/custom_envs/obstructedmaze_fixedgrid.py
new file mode 100644
index 00000000..89f9e53e
--- /dev/null
+++ b/syllabus/examples/custom_envs/obstructedmaze_fixedgrid.py
@@ -0,0 +1,232 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from gym_minigrid.minigrid import *
+from gym_minigrid.roomgrid import RoomGrid
+from gym_minigrid.register import register
+
+class ObstructedMazeEnvFixedGrid(RoomGrid):
+ """
+ A blue ball is hidden in the maze. Doors may be locked,
+ doors may be obstructed by a ball and keys may be hidden in boxes.
+ """
+
+ def __init__(self,
+ num_rows,
+ num_cols,
+ num_rooms_visited,
+ seed=None
+ ):
+ room_size = 7
+ max_steps = 4*num_rooms_visited*room_size**2
+
+ super().__init__(
+ room_size=room_size,
+ num_rows=num_rows,
+ num_cols=num_cols,
+ frame_rows=3,
+ frame_cols=3,
+ max_steps=max_steps,
+ seed=seed
+ )
+
+ def _gen_grid(self, width, height):
+ super()._gen_grid(width, height)
+
+ # Define all possible colors for doors
+ self.door_colors = self._rand_subset(COLOR_NAMES, len(COLOR_NAMES))
+ # Define the color of the ball to pick up
+ self.ball_to_find_color = COLOR_NAMES[0]
+ # Define the color of the balls that obstruct doors
+ self.blocking_ball_color = COLOR_NAMES[1]
+ # Define the color of boxes in which keys are hidden
+ self.box_color = COLOR_NAMES[2]
+
+ self.mission = "pick up the %s ball" % self.ball_to_find_color
+
+ def step(self, action):
+ obs, reward, done, info = super().step(action)
+
+ if action == self.actions.pickup:
+ if self.carrying and self.carrying == self.obj:
+ reward = self._reward()
+ done = True
+
+ return obs, reward, done, info
+
+ def add_door(self, i, j, door_idx=0, color=None, locked=False, key_in_box=False, blocked=False):
+ """
+ Add a door. If the door must be locked, it also adds the key.
+ If the key must be hidden, it is put in a box. If the door must
+ be obstructed, it adds a ball in front of the door.
+ """
+
+ door, door_pos = super().add_door(i, j, door_idx, color, locked=locked)
+
+ if blocked:
+ vec = DIR_TO_VEC[door_idx]
+ blocking_ball = Ball(self.blocking_ball_color) if blocked else None
+ self.grid.set(door_pos[0]-vec[0], door_pos[1]-vec[1], blocking_ball)
+
+ if locked:
+ obj = Key(door.color)
+ if key_in_box:
+ box = Box(self.box_color) if key_in_box else None
+ box.contains = obj
+ obj = box
+ self.place_in_room(i, j, obj)
+
+ return door, door_pos
+
+class ObstructedMaze_1Dlhb(ObstructedMazeEnvFixedGrid):
+ """
+ A blue ball is hidden in a 2x1 maze. A locked door separates
+ rooms. Doors are obstructed by a ball and keys are hidden in boxes.
+ """
+
+ def __init__(self, key_in_box=True, blocked=True, seed=None):
+ self.key_in_box = key_in_box
+ self.blocked = blocked
+
+ super().__init__(
+ num_rows=1,
+ num_cols=2,
+ num_rooms_visited=2,
+ seed=seed
+ )
+
+ def _gen_grid(self, width, height):
+ super()._gen_grid(width, height)
+
+ self.add_door(0, 0, door_idx=0, color=self.door_colors[0],
+ locked=True,
+ key_in_box=self.key_in_box,
+ blocked=self.blocked)
+
+ self.obj, _ = self.add_object(1, 0, "ball", color=self.ball_to_find_color)
+ self.place_agent(0, 0)
+
+class ObstructedMaze_1Dl(ObstructedMaze_1Dlhb):
+ def __init__(self, seed=None):
+ super().__init__(False, False, seed)
+
+class ObstructedMaze_1Dlh(ObstructedMaze_1Dlhb):
+ def __init__(self, seed=None):
+ super().__init__(True, False, seed)
+
+class ObstructedMaze_Full(ObstructedMazeEnvFixedGrid):
+ """
+ A blue ball is hidden in one of the 4 corners of a 3x3 maze. Doors
+ are locked, doors are obstructed by a ball and keys are hidden in
+ boxes.
+ """
+
+ def __init__(self, agent_room=(1, 1), key_in_box=True, blocked=True,
+ num_quarters=4, num_rooms_visited=25, seed=None):
+ self.agent_room = agent_room
+ self.key_in_box = key_in_box
+ self.blocked = blocked
+ self.num_quarters = num_quarters
+
+ super().__init__(
+ num_rows=3,
+ num_cols=3,
+ num_rooms_visited=num_rooms_visited,
+ seed=seed
+ )
+
+ def _gen_grid(self, width, height):
+ super()._gen_grid(width, height)
+
+ middle_room = (1, 1)
+ # Define positions of "side rooms" i.e. rooms that are neither
+ # corners nor the center.
+ side_rooms = [(2, 1), (1, 2), (0, 1), (1, 0)][:self.num_quarters]
+ for i in range(len(side_rooms)):
+ side_room = side_rooms[i]
+
+ # Add a door between the center room and the side room
+ self.add_door(*middle_room, door_idx=i, color=self.door_colors[i], locked=False)
+
+ for k in [-1, 1]:
+ # Add a door to each side of the side room
+ self.add_door(*side_room, locked=True,
+ door_idx=(i+k)%4,
+ color=self.door_colors[(i+k)%len(self.door_colors)],
+ key_in_box=self.key_in_box,
+ blocked=self.blocked)
+
+ corners = [(2, 0), (2, 2), (0, 2), (0, 0)][:self.num_quarters]
+ ball_room = self._rand_elem(corners)
+
+ self.obj, _ = self.add_object(*ball_room, "ball", color=self.ball_to_find_color)
+ self.place_agent(*self.agent_room)
+
+class ObstructedMaze_2Dl(ObstructedMaze_Full):
+ def __init__(self, seed=None):
+ super().__init__((2, 1), False, False, 1, 4, seed)
+
+class ObstructedMaze_2Dlh(ObstructedMaze_Full):
+ def __init__(self, seed=None):
+ super().__init__((2, 1), True, False, 1, 4, seed)
+
+
+class ObstructedMaze_2Dlhb(ObstructedMaze_Full):
+ def __init__(self, seed=None):
+ super().__init__((2, 1), True, True, 1, 4, seed)
+
+class ObstructedMaze_1Q(ObstructedMaze_Full):
+ def __init__(self, seed=None):
+ super().__init__((1, 1), True, True, 1, 5, seed)
+
+class ObstructedMaze_2Q(ObstructedMaze_Full):
+ def __init__(self, seed=None):
+ super().__init__((1, 1), True, True, 2, 11, seed)
+
+register(
+ id="MiniGrid-ObstructedMaze-1Dl-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_1Dl"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-1Dlh-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_1Dlh"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-1Dlhb-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_1Dlhb"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-2Dl-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_2Dl"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-2Dlh-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_2Dlh"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-2Dlhb-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_2Dlhb"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-1Q-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_1Q"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-2Q-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_2Q"
+)
+
+register(
+ id="MiniGrid-ObstructedMaze-Full-fixed_grid-v0",
+ entry_point=f"{__name__}:ObstructedMaze_Full"
+)
\ No newline at end of file
diff --git a/syllabus/examples/custom_envs/obstructedmaze_gamut.py b/syllabus/examples/custom_envs/obstructedmaze_gamut.py
new file mode 100644
index 00000000..56efa0d2
--- /dev/null
+++ b/syllabus/examples/custom_envs/obstructedmaze_gamut.py
@@ -0,0 +1,185 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import gym
+from gym_minigrid.register import register
+
+from custom_envs.obstructedmaze_fixedgrid import ObstructedMazeEnvFixedGrid
+
+
+ALL_SUBENVS = [
+ 'MiniGrid-ObstructedMaze-1Dl-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-1Dlh-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-1Dlhb-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-2Dl-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-2Dlh-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-2Dlhb-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-1Q-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-2Q-fixed_grid-v0',
+ 'MiniGrid-ObstructedMaze-Full-fixed_grid-v0'
+]
+
+TILE_PIXELS = 32
+
+
+class ObstructedMazeGamut(gym.Env):
+ def __init__(self, distribution='easy', max_difficulty=None, seed=1337):
+
+ self.distribution = distribution
+ if distribution == 'easy':
+ self.max_difficulty = 3
+ elif distribution == 'medium':
+ self.max_difficulty = 6
+ elif distribution == 'hard':
+ self.max_difficulty = 9
+ else:
+ raise ValueError(f'Unsupported distribution {distribution}.')
+
+ if max_difficulty is not None:
+ self.max_difficulty = max_difficulty
+
+ self.subenvs = []
+ for env_name in ALL_SUBENVS[:self.max_difficulty]:
+ self.subenvs.append(gym.make(env_name))
+
+ self.num_subenvs = len(self.subenvs)
+
+ self.seed(seed)
+ self.reset()
+
+ @property
+ def actions(self):
+ return self.env.actions
+
+ @property
+ def agent_view_size(self):
+ return self.env.agent_view_size
+
+ @property
+ def reward_range(self):
+ return self.env.reward_range
+
+ @property
+ def window(self):
+ return self.env.window
+
+ @property
+ def width(self):
+ return self.env.width
+
+ @property
+ def height(self):
+ return self.env.height
+
+ @property
+ def grid(self):
+ return self.env.grid
+
+ @property
+ def max_steps(self):
+ return self.env.max_steps
+
+ @property
+ def see_through_walls(self):
+ return self.env.see_through_walls
+
+ @property
+ def agent_pos(self):
+ return self.env.agent_pos
+
+ @property
+ def agent_dir(self):
+ return self.env.agent_dir
+
+ @property
+ def step_count(self):
+ return self.env.step_count
+
+ @property
+ def carrying(self):
+ return self.env.carrying
+
+ @property
+ def observation_space(self):
+ return self.env.observation_space
+
+ @property
+ def action_space(self):
+ return self.env.action_space
+
+ @property
+ def steps_remaining(self):
+ return self.env.steps_remaining
+
+ def __str__(self):
+ return self.env.__str__()
+
+ def reset(self):
+ return self.env.reset()
+
+ def seed(self, seed=1337):
+ env_index = seed % self.num_subenvs
+ self.env = self.subenvs[env_index]
+ self.env.seed(seed)
+
+ def hash(self, size=16):
+ return self.env.hash(size)
+
+ def relative_coords(self, x, y):
+ return self.env.relative_coords(x, y)
+
+ def in_view(self, x, y):
+ return self.env.in_view(x, y)
+
+ def agent_sees(self, x, y):
+ return self.env.agent_sees(x, y)
+
+ def step(self, action):
+ return self.env.step(action)
+
+ def gen_obs_grid(self):
+ return self.env.gen_obs_grid()
+
+ def gen_obs(self):
+ return self.env.gen_obs()
+
+ def get_obs_render(self, obs, tile_size=TILE_PIXELS//2):
+ return self.env.get_obs_render(obs, tile_size)
+
+ def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS):
+ return self.env.render(mode, close, highlight, tile_size)
+
+ def close(self):
+ return self.env.close()
+
+
+class ObstructedMazeGamut_Easy(ObstructedMazeGamut):
+ def __init__(self, seed=1337):
+ super().__init__(distribution='easy', seed=seed)
+
+class ObstructedMazeGamut_Medium(ObstructedMazeGamut):
+ def __init__(self, seed=1337):
+ super().__init__(distribution='medium', seed=seed)
+
+class ObstructedMazeGamut_Hard(ObstructedMazeGamut):
+ def __init__(self, seed=1337):
+ super().__init__(distribution='hard', seed=seed)
+
+
+register(
+ id="MiniGrid-ObstructedMazeGamut-Easy-v0",
+ entry_point=f"{__name__}:ObstructedMazeGamut_Easy"
+)
+
+register(
+ id="MiniGrid-ObstructedMazeGamut-Medium-v0",
+ entry_point=f"{__name__}:ObstructedMazeGamut_Medium"
+)
+
+register(
+ id="MiniGrid-ObstructedMazeGamut-Hard-v0",
+ entry_point=f"{__name__}:ObstructedMazeGamut_Hard"
+)
diff --git a/syllabus/examples/models/__pycache__/__init__.cpython-38.pyc b/syllabus/examples/models/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 00000000..214f3fd7
Binary files /dev/null and b/syllabus/examples/models/__pycache__/__init__.cpython-38.pyc differ
diff --git a/syllabus/examples/models/__pycache__/__init__.cpython-39.pyc b/syllabus/examples/models/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 00000000..eb388eeb
Binary files /dev/null and b/syllabus/examples/models/__pycache__/__init__.cpython-39.pyc differ
diff --git a/syllabus/examples/models/__pycache__/minigrid_model.cpython-38.pyc b/syllabus/examples/models/__pycache__/minigrid_model.cpython-38.pyc
new file mode 100644
index 00000000..e6f8c6bb
Binary files /dev/null and b/syllabus/examples/models/__pycache__/minigrid_model.cpython-38.pyc differ
diff --git a/syllabus/examples/models/__pycache__/minigrid_model.cpython-39.pyc b/syllabus/examples/models/__pycache__/minigrid_model.cpython-39.pyc
new file mode 100644
index 00000000..34242b83
Binary files /dev/null and b/syllabus/examples/models/__pycache__/minigrid_model.cpython-39.pyc differ
diff --git a/syllabus/examples/models/__pycache__/minigrid_model_verma.cpython-38.pyc b/syllabus/examples/models/__pycache__/minigrid_model_verma.cpython-38.pyc
new file mode 100644
index 00000000..c4275b5b
Binary files /dev/null and b/syllabus/examples/models/__pycache__/minigrid_model_verma.cpython-38.pyc differ
diff --git a/syllabus/examples/models/__pycache__/minigrid_model_verma.cpython-39.pyc b/syllabus/examples/models/__pycache__/minigrid_model_verma.cpython-39.pyc
new file mode 100644
index 00000000..a54ebc4e
Binary files /dev/null and b/syllabus/examples/models/__pycache__/minigrid_model_verma.cpython-39.pyc differ
diff --git a/syllabus/examples/models/__pycache__/procgen_model.cpython-38.pyc b/syllabus/examples/models/__pycache__/procgen_model.cpython-38.pyc
new file mode 100644
index 00000000..97ec398d
Binary files /dev/null and b/syllabus/examples/models/__pycache__/procgen_model.cpython-38.pyc differ
diff --git a/syllabus/examples/models/__pycache__/procgen_model.cpython-39.pyc b/syllabus/examples/models/__pycache__/procgen_model.cpython-39.pyc
new file mode 100644
index 00000000..c71f46c1
Binary files /dev/null and b/syllabus/examples/models/__pycache__/procgen_model.cpython-39.pyc differ
diff --git a/syllabus/examples/models/minigrid_model_verma.py b/syllabus/examples/models/minigrid_model_verma.py
new file mode 100644
index 00000000..bb4efbd4
--- /dev/null
+++ b/syllabus/examples/models/minigrid_model_verma.py
@@ -0,0 +1,185 @@
+import numpy as np
+import torch
+import torch.nn as nn
+
+def init(module, weight_init, bias_init, gain=1):
+ weight_init(module.weight.data, gain=gain)
+ bias_init(module.bias.data)
+ return module
+
+init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0))
+
+init_relu_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), nn.init.calculate_gain('relu'))
+
+init_tanh_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2))
+
+class FixedCategorical(torch.distributions.Categorical):
+ """
+ Categorical distribution object
+ """
+ def sample(self):
+ return super().sample().unsqueeze(-1)
+
+ def log_probs(self, actions):
+ return (
+ super()
+ .log_prob(actions.squeeze(-1))
+ .view(actions.size(0), -1)
+ .sum(-1)
+ .unsqueeze(-1)
+ )
+
+ def mode(self):
+ return self.probs.argmax(dim=-1, keepdim=True)
+
+class Categorical(nn.Module):
+ """
+ Categorical distribution (NN module)
+ """
+ def __init__(self, num_inputs, num_outputs):
+ super(Categorical, self).__init__()
+
+ init_ = lambda m: init(
+ m,
+ nn.init.orthogonal_,
+ lambda x: nn.init.constant_(x, 0),
+ gain=0.01)
+
+ self.linear = init_(nn.Linear(num_inputs, num_outputs))
+
+ def forward(self, x):
+ x = self.linear(x)
+ return FixedCategorical(logits=x)
+
+def apply_init_(modules):
+ """
+ Initialize NN modules
+ """
+ for m in modules:
+ if isinstance(m, nn.Conv2d):
+ nn.init.xavier_uniform_(m.weight)
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
+ nn.init.constant_(m.weight, 1)
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+
+class MinigridPolicyVerma(nn.Module):
+ """
+ Actor-Critic module
+ """
+ def __init__(self, obs_shape, num_actions, arch='small', base_kwargs=None):
+ super(MinigridPolicyVerma, self).__init__()
+
+ if base_kwargs is None:
+ base_kwargs = {}
+
+ final_channels = 32 if arch == 'small' else 64
+
+ self.image_conv = nn.Sequential(
+ nn.Conv2d(3, 16, (2, 2)),
+ nn.ReLU(),
+ nn.MaxPool2d((2, 2)),
+ nn.Conv2d(16, 32, (2, 2)),
+ nn.ReLU(),
+ nn.Conv2d(32, final_channels, (2, 2)),
+ nn.ReLU()
+ )
+ n = obs_shape[-2]
+ m = obs_shape[-1]
+ self.image_embedding_size = ((n-1)//2-2)*((m-1)//2-2)*final_channels
+ self.embedding_size = self.image_embedding_size
+
+ # Define actor's model
+ self.actor_base = nn.Sequential(
+ init_tanh_(nn.Linear(self.embedding_size, 64)),
+ nn.Tanh(),
+ )
+
+ # Define critic's model
+ self.critic = nn.Sequential(
+ init_tanh_(nn.Linear(self.embedding_size, 64)),
+ nn.Tanh(),
+ init_(nn.Linear(64, 1))
+ )
+
+ self.dist = Categorical(64, num_actions)
+
+ apply_init_(self.modules())
+
+ self.train()
+
+ @property
+ def is_recurrent(self):
+ return False
+
+ @property
+ def recurrent_hidden_state_size(self):
+ """Size of rnn_hx."""
+ return 1
+
+ def forward(self, inputs, rnn_hxs, masks):
+ raise NotImplementedError
+
+ def act(self, inputs, deterministic=False):
+ x = inputs
+ x = self.image_conv(x)
+ x = x.flatten(1, -1)
+ actor_features = self.actor_base(x)
+ value = self.critic(x)
+ dist = self.dist(actor_features)
+
+ if deterministic:
+ action = dist.mode()
+ else:
+ action = dist.sample()
+
+ # action_log_probs = dist.log_probs(action)
+ action_log_dist = dist.logits
+ dist_entropy = dist.entropy().mean()
+
+ return action, action_log_dist, dist_entropy, value
+
+ def get_value(self, inputs, rnn_hxs, masks):
+ x = inputs
+ x = self.image_conv(x)
+ x = x.flatten(1, -1)
+ return self.critic(x)
+
+ def evaluate_actions(self, inputs, rnn_hxs, masks, action):
+ x = inputs
+ x = self.image_conv(x)
+ x = x.flatten(1, -1)
+ actor_features = self.actor_base(x)
+ value = self.critic(x)
+ dist = self.dist(actor_features)
+
+ action_log_probs = dist.log_probs(action)
+ dist_entropy = dist.entropy().mean()
+
+ return value, action_log_probs, dist_entropy, rnn_hxs
+
+class MinigridAgentVerma(MinigridPolicyVerma):
+ def get_value(self, x):
+ x = self.image_conv(x)
+ x = x.flatten(1, -1)
+ return self.critic(x)
+
+ def get_action_and_value(self, x, action=None, full_log_probs=False):
+ x = self.image_conv(x)
+ x = x.flatten(1, -1)
+ actor_features = self.actor_base(x)
+ value = self.critic(x)
+ dist = self.dist(actor_features)
+
+ action = torch.squeeze(dist.sample())
+
+ action_log_probs = torch.squeeze(dist.log_probs(action))
+ dist_entropy = dist.entropy()
+
+ if full_log_probs:
+ log_probs = torch.log(dist.probs)
+ return action, action_log_probs, dist_entropy, value, log_probs
+
+ return action, action_log_probs, dist_entropy, value
diff --git a/syllabus/examples/task_wrappers/minigrid_task_wrapper_verma.py b/syllabus/examples/task_wrappers/minigrid_task_wrapper_verma.py
new file mode 100644
index 00000000..cf440903
--- /dev/null
+++ b/syllabus/examples/task_wrappers/minigrid_task_wrapper_verma.py
@@ -0,0 +1,51 @@
+import gymnasium as gym
+import numpy as np
+from syllabus.core import TaskWrapper
+from syllabus.task_space import TaskSpace
+from gym_minigrid.wrappers import FullyObsWrapper, ImgObsWrapper
+from shimmy.openai_gym_compatibility import GymV21CompatibilityV0
+from gymnasium.spaces import Box
+
+class MinigridTaskWrapperVerma(TaskWrapper):
+ def __init__(self, env: gym.Env, env_id, seed=0):
+ super().__init__(env)
+ self.env.unwrapped.seed(seed)
+ self.task_space = TaskSpace(gym.spaces.Discrete(200), list(np.arange(0, 200)))
+ self.env_id = env_id
+ self.task = seed
+ self.episode_return = 0
+ m, n, c = self.env.observation_space.shape
+ self.observation_space = Box(
+ self.observation_space.low[0, 0, 0],
+ self.observation_space.high[0, 0, 0],
+ [c, m, n],
+ dtype=self.observation_space.dtype)
+
+ def observation(self, obs):
+ obs = obs.transpose(2, 0, 1)
+ return obs
+
+ def reset(self, new_task=None, **kwargs):
+ self.episode_return = 0.0
+ if new_task is not None:
+ self.change_task(new_task)
+ obs, info = self.env.reset(**kwargs)
+ return self.observation(obs), info
+
+ def change_task(self, new_task: int):
+ """
+ Change task by directly editing environment class.
+
+ Ignores requests for unknown tasks or task changes outside of a reset.
+ """
+ seed = int(new_task)
+ self.task = seed
+ self.seed(seed)
+
+ def seed(self, seed):
+ self.env.unwrapped.seed(int(seed))
+
+ def step(self, action):
+ obs, rew, term, trunc, info = self.env.step(action)
+ self.episode_return += rew
+ return self.observation(obs), rew, term, trunc, info
diff --git a/syllabus/examples/training_scripts/.gitignore b/syllabus/examples/training_scripts/.gitignore
new file mode 100644
index 00000000..49e0461d
--- /dev/null
+++ b/syllabus/examples/training_scripts/.gitignore
@@ -0,0 +1,3 @@
+command.txt
+wandb
+requirements.txt
diff --git a/syllabus/examples/training_scripts/test_minigrid_wrapper.py b/syllabus/examples/training_scripts/test_minigrid_wrapper.py
new file mode 100644
index 00000000..80c19e78
--- /dev/null
+++ b/syllabus/examples/training_scripts/test_minigrid_wrapper.py
@@ -0,0 +1,553 @@
+import argparse
+import os, sys
+import random
+import time
+from collections import deque
+from distutils.util import strtobool
+
+import gym as openai_gym
+import gymnasium as gym
+import numpy as np
+import procgen # noqa: F401
+from procgen import ProcgenEnv
+import torch
+import torch.nn as nn
+import torch.optim as optim
+from shimmy.openai_gym_compatibility import GymV21CompatibilityV0
+from torch.utils.tensorboard import SummaryWriter
+
+from syllabus.core import MultiProcessingSyncWrapper, make_multiprocessing_curriculum
+from syllabus.curricula import CentralizedPrioritizedLevelReplay, DomainRandomization, LearningProgressCurriculum, SequentialCurriculum
+from syllabus.examples.models import ProcgenAgent, MinigridAgent
+from syllabus.examples.task_wrappers import ProcgenTaskWrapper
+from syllabus.examples.utils.vecenv import VecMonitor, VecNormalize, VecExtractDictObs
+
+from gym_minigrid.wrappers import FullyObsWrapper, ImgObsWrapper
+sys.path.append("/data/averma/MARL/Syllabus/syllabus/examples/task_wrappers")
+sys.path.append("/data/averma/MARL/Syllabus/syllabus/examples/models")
+from minigrid_model_verma import *
+from minigrid_task_wrapper_verma import *
+import torch.nn as nn
+
+
+def parse_args():
+ # fmt: off
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
+ help="the name of this experiment")
+ parser.add_argument("--seed", type=int, default=1,
+ help="seed of the experiment")
+ parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
+ help="if toggled, `torch.backends.cudnn.deterministic=False`")
+ parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
+ help="if toggled, cuda will be enabled by default")
+ parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
+ help="if toggled, this experiment will be tracked with Weights and Biases")
+ parser.add_argument("--wandb-project-name", type=str, default="syllabus",
+ help="the wandb's project name")
+ parser.add_argument("--wandb-entity", type=str, default=None,
+ help="the entity (team) of wandb's project")
+ parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
+ help="weather to capture videos of the agent performances (check out `videos` folder)")
+ parser.add_argument("--logging-dir", type=str, default=".",
+ help="the base directory for logging and wandb storage.")
+
+ # Algorithm specific arguments
+ parser.add_argument("--env-id", type=str, default="starpilot",
+ help="the id of the environment")
+ parser.add_argument("--total-timesteps", type=int, default=int(25e6),
+ help="total timesteps of the experiments")
+ parser.add_argument("--learning-rate", type=float, default=5e-4,
+ help="the learning rate of the optimizer")
+ parser.add_argument("--num-envs", type=int, default=64,
+ help="the number of parallel game environments")
+ parser.add_argument("--num-steps", type=int, default=256,
+ help="the number of steps to run in each environment per policy rollout")
+ parser.add_argument("--anneal-lr", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
+ help="Toggle learning rate annealing for policy and value networks")
+ parser.add_argument("--gae", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
+ help="Use GAE for advantage computation")
+ parser.add_argument("--gamma", type=float, default=0.999,
+ help="the discount factor gamma")
+ parser.add_argument("--gae-lambda", type=float, default=0.95,
+ help="the lambda for the general advantage estimation")
+ parser.add_argument("--num-minibatches", type=int, default=8,
+ help="the number of mini-batches")
+ parser.add_argument("--update-epochs", type=int, default=3,
+ help="the K epochs to update the policy")
+ parser.add_argument("--norm-adv", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
+ help="Toggles advantages normalization")
+ parser.add_argument("--clip-coef", type=float, default=0.2,
+ help="the surrogate clipping coefficient")
+ parser.add_argument("--clip-vloss", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
+ help="Toggles whether or not to use a clipped loss for the value function, as per the paper.")
+ parser.add_argument("--ent-coef", type=float, default=0.01,
+ help="coefficient of the entropy")
+ parser.add_argument("--vf-coef", type=float, default=0.5,
+ help="coefficient of the value function")
+ parser.add_argument("--max-grad-norm", type=float, default=0.5,
+ help="the maximum norm for the gradient clipping")
+ parser.add_argument("--target-kl", type=float, default=None,
+ help="the target KL divergence threshold")
+
+ # Procgen arguments
+ parser.add_argument("--full-dist", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
+ help="Train on full distribution of levels.")
+
+ # Curriculum arguments
+ parser.add_argument("--curriculum", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
+ help="if toggled, this experiment will use curriculum learning")
+ parser.add_argument("--curriculum-method", type=str, default="plr",
+ help="curriculum method to use")
+ parser.add_argument("--num-eval-episodes", type=int, default=10,
+ help="the number of episodes to evaluate the agent on after each policy update.")
+
+ args = parser.parse_args()
+ args.batch_size = int(args.num_envs * args.num_steps)
+ args.minibatch_size = int(args.batch_size // args.num_minibatches)
+ # fmt: on
+ return args
+
+
+PROCGEN_RETURN_BOUNDS = {
+ "coinrun": (5, 10),
+ "starpilot": (2.5, 64),
+ "caveflyer": (3.5, 12),
+ "dodgeball": (1.5, 19),
+ "fruitbot": (-1.5, 32.4),
+ "chaser": (0.5, 13),
+ "miner": (1.5, 13),
+ "jumper": (3, 10),
+ "leaper": (3, 10),
+ "maze": (5, 10),
+ "bigfish": (1, 40),
+ "heist": (3.5, 10),
+ "climber": (2, 12.6),
+ "plunder": (4.5, 30),
+ "ninja": (3.5, 10),
+ "bossfight": (0.5, 13),
+}
+
+
+def make_env_minigrid(env_name, seed, curriculum=None):
+ def thunk():
+ env = openai_gym.make(env_name)
+ if curriculum is not None:
+ env = FullyObsWrapper(env)
+ env = ImgObsWrapper(env)
+ env = GymV21CompatibilityV0(env=env)
+ env = MinigridTaskWrapperVerma(env=env, env_id=env_name, seed=seed)
+ env = MultiProcessingSyncWrapper(
+ env,
+ curriculum.get_components(),
+ update_on_step=False,
+ task_space=env.task_space,
+ )
+ else:
+ env = GymV21CompatibilityV0(env=env)
+ return env
+
+ return thunk
+
+def wrap_vecenv(vecenv):
+ vecenv.is_vector_env = True
+ vecenv = VecMonitor(venv=vecenv, filename=None, keep_buf=100)
+ vecenv = VecNormalize(venv=vecenv, ob=False, ret=True)
+ return vecenv
+
+def level_replay_evaluate_minigrid(
+ env_name,
+ policy,
+ num_episodes,
+ device,
+ num_levels=0
+):
+ policy.eval()
+ eval_envs = gym.vector.AsyncVectorEnv(
+ [
+ make_env_minigrid(
+ env_name,
+ args.seed + i,
+ curriculum=curriculum if args.curriculum else None
+ )
+ # for i in range(args.num_envs)
+ for i in range(num_episodes)
+ ]
+ )
+ eval_envs = wrap_vecenv(eval_envs)
+ eval_obs, _ = eval_envs.reset()
+ eval_episode_rewards = [-1] * num_episodes
+
+ while -1 in eval_episode_rewards:
+ with torch.no_grad():
+ eval_action, _, _, _ = policy.get_action_and_value(torch.Tensor(eval_obs).to(device))
+
+ eval_obs, _, truncs, terms, infos = eval_envs.step(eval_action.cpu().numpy())
+ # len(infos) = 64
+ # num_episodes = 10
+ # print("info length: %d"%len(infos))
+ # print("num_episode length: %d"%num_episodes)
+ sys.stdout.flush()
+ for i, info in enumerate(infos):
+ if 'episode' in info.keys() and eval_episode_rewards[i] == -1:
+ eval_episode_rewards[i] = info['episode']['r']
+ print(f"level replay eval works! {eval_episode_rewards[i]}")
+
+ # print(eval_episode_rewards)
+ mean_returns = np.mean(eval_episode_rewards)
+ stddev_returns = np.std(eval_episode_rewards)
+ # env_min, env_max = PROCGEN_RETURN_BOUNDS[args.env_id]
+ env_min = 0
+ env_max = 1
+ normalized_mean_returns = (mean_returns - env_min) / (env_max - env_min)
+ policy.train()
+ return mean_returns, stddev_returns, normalized_mean_returns
+
+
+def make_value_fn():
+ def get_value(obs):
+ obs = np.array(obs)
+ with torch.no_grad():
+ return agent.get_value(torch.Tensor(obs).to(device))
+ return get_value
+
+def print_values(obj):
+ describer = obj.__dict__
+ for key in describer.keys():
+ print(f"{key}: {describer[key]}")
+ print()
+
+
+if __name__ == "__main__":
+
+
+ args = parse_args()
+ env_name = "MiniGrid-MultiRoom-N4-Random-v0"
+ args.env_id = env_name
+ run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
+ random.seed(args.seed)
+ np.random.seed(args.seed)
+ torch.manual_seed(args.seed)
+ torch.backends.cudnn.deterministic = args.torch_deterministic
+ device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
+
+ print("Device:", device)
+
+ if args.track:
+ import wandb
+
+ wandb.init(
+ project=args.wandb_project_name,
+ entity=args.wandb_entity,
+ sync_tensorboard=True,
+ config=vars(args),
+ name=run_name,
+ monitor_gym=True,
+ save_code=True,
+ dir=args.logging_dir
+ )
+
+ # Curriculum setup
+ curriculum = None
+
+ writer = SummaryWriter(os.path.join(args.logging_dir, "./runs/{run_name}"))
+ writer.add_text(
+ "hyperparameters",
+ "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
+ )
+
+ if args.curriculum:
+ print("args:\n--------------")
+ print(f"{args}\n-------------\n")
+
+ # sample_env = openai_gym.make(f"procgen-{args.env_id}-v0")
+ # sample_env = GymV21CompatibilityV0(env=sample_env)
+ # sample_env = ProcgenTaskWrapper(sample_env, args.env_id, seed=args.seed)
+
+ sample_env = openai_gym.make(env_name)
+ sample_env = FullyObsWrapper(sample_env)
+ sample_env = ImgObsWrapper(sample_env)
+ sample_env = GymV21CompatibilityV0(env=sample_env)
+ sample_env = MinigridTaskWrapperVerma(sample_env, args.env_id, seed=args.seed)
+
+ print(f"has curriculum: {args.curriculum}")
+
+ if args.curriculum_method == "plr":
+ print("Using prioritized level replay.")
+ task_sampler_kwargs_dict = {"strategy": "value_l1", "temperature":0.1, "staleness_coef":0.3}
+ curriculum = CentralizedPrioritizedLevelReplay(
+ sample_env.task_space,
+ num_steps=args.num_steps,
+ num_processes=args.num_envs,
+ gamma=args.gamma,
+ gae_lambda=args.gae_lambda,
+ task_sampler_kwargs_dict=task_sampler_kwargs_dict
+ )
+ # elif args.curriculum_method == "dr":
+ # print("Using domain randomization.")
+ # curriculum = DomainRandomization(sample_env.task_space)
+ # elif args.curriculum_method == "lp":
+ # print("Using learning progress.")
+ # curriculum = LearningProgressCurriculum(sample_env.task_space)
+ # elif args.curriculum_method == "sq":
+ # print("Using sequential curriculum.")
+ # curricula = []
+ # stopping = []
+ # for i in range(199):
+ # curricula.append(i + 1)
+ # stopping.append("steps>=50000")
+ # curricula.append(list(range(i + 1)))
+ # stopping.append("steps>=50000")
+ # curriculum = SequentialCurriculum(curricula, stopping[:-1], sample_env.task_space)
+ else:
+ raise ValueError(f"Unknown curriculum method {args.curriculum_method}")
+ curriculum = make_multiprocessing_curriculum(curriculum)
+ del sample_env
+
+ # env setup
+ print("Creating env")
+
+ envs = gym.vector.AsyncVectorEnv(
+ [
+ make_env_minigrid(
+ env_name,
+ args.seed + i,
+ curriculum=curriculum if args.curriculum else None
+ )
+ for i in range(args.num_envs)
+ ]
+ )
+ envs = wrap_vecenv(envs)
+ next_obs, _ = envs.reset()
+ assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"
+
+ agent = MinigridAgentVerma(
+ envs.single_observation_space.shape,
+ envs.single_action_space.n,
+ arch="large",
+ base_kwargs={'recurrent': False, 'hidden_size': 256}
+ ).to(device)
+ optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)
+
+ # eval_envs = gym.vector.AsyncVectorEnv(
+ # [
+ # make_env_minigrid(
+ # env_name,
+ # args.seed + i,
+ # curriculum=curriculum if args.curriculum else None
+ # )
+ # for i in range(args.num_envs)
+ # ]
+ # )
+ #
+ # eval_envs = wrap_vecenv(eval_envs)
+ # eval_obs, _ = eval_envs.reset()
+ # with torch.no_grad():
+ # eval_action, _, _, _ = agent.get_action_and_value(torch.Tensor(eval_obs).to(device))
+ # eval_obs, _, truncs, terms, infos = eval_envs.step(eval_action.cpu().numpy())
+ # print(len(eval_obs))
+ # print(len(infos))
+ # print(args.num_envs)
+ # print(args.num_eval_episodes)
+
+
+ # ALGO Logic: Storage setup
+ obs = torch.zeros((args.num_steps, args.num_envs) + envs.single_observation_space.shape).to(device)
+ actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device)
+ logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)
+ rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)
+ dones = torch.zeros((args.num_steps, args.num_envs)).to(device)
+ values = torch.zeros((args.num_steps, args.num_envs)).to(device)
+
+ # TRY NOT TO MODIFY: start the game
+ global_step = 0
+ start_time = time.time()
+ next_obs, _ = envs.reset()
+ next_obs = torch.Tensor(next_obs).to(device)
+ next_done = torch.zeros(args.num_envs).to(device)
+ num_updates = args.total_timesteps // args.batch_size
+ episode_rewards = deque(maxlen=10)
+ completed_episodes = 0
+
+ for update in range(1, num_updates + 1):
+ # Annealing the rate if instructed to do so.
+ if args.anneal_lr:
+ frac = 1.0 - (update - 1.0) / num_updates
+ lrnow = frac * args.learning_rate
+ optimizer.param_groups[0]["lr"] = lrnow
+
+ for step in range(0, args.num_steps):
+ global_step += 1 * args.num_envs
+ obs[step] = next_obs
+ dones[step] = next_done
+
+ # ALGO LOGIC: action logic
+ with torch.no_grad():
+ action, logprob, _, value = agent.get_action_and_value(next_obs)
+ values[step] = value.flatten()
+ actions[step] = action
+ logprobs[step] = logprob
+
+ # TRY NOT TO MODIFY: execute the game and log data.
+ next_obs, reward, term, trunc, info = envs.step(action.cpu().numpy())
+ done = np.logical_or(term, trunc)
+ rewards[step] = torch.tensor(reward).to(device).view(-1)
+ next_obs, next_done = torch.Tensor(next_obs).to(device), torch.Tensor(done).to(device)
+ completed_episodes += sum(done)
+
+ for item in info:
+ if "episode" in item.keys():
+ episode_rewards.append(item['episode']['r'])
+ print(f"global_step={global_step}, episodic_return={item['episode']['r']}")
+ writer.add_scalar("charts/episodic_return", item["episode"]["r"], global_step)
+ writer.add_scalar("charts/episodic_length", item["episode"]["l"], global_step)
+ if curriculum is not None:
+ curriculum.log_metrics(writer, global_step)
+ break
+
+ # Syllabus curriculum update
+ if args.curriculum and args.curriculum_method == "plr":
+ with torch.no_grad():
+ next_value = agent.get_value(next_obs)
+ tasks = envs.get_attr("task")
+
+ update = {
+ "update_type": "on_demand",
+ "metrics": {
+ "value": value,
+ "next_value": next_value,
+ "rew": reward,
+ "dones": done,
+ "tasks": tasks,
+ },
+ }
+ curriculum.update(update)
+
+ # bootstrap value if not done
+ with torch.no_grad():
+ next_value = agent.get_value(next_obs).reshape(1, -1)
+ if args.gae:
+ advantages = torch.zeros_like(rewards).to(device)
+ lastgaelam = 0
+ for t in reversed(range(args.num_steps)):
+ if t == args.num_steps - 1:
+ nextnonterminal = 1.0 - next_done
+ nextvalues = next_value
+ else:
+ nextnonterminal = 1.0 - dones[t + 1]
+ nextvalues = values[t + 1]
+ delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t]
+ advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam
+ returns = advantages + values
+ else:
+ returns = torch.zeros_like(rewards).to(device)
+ for t in reversed(range(args.num_steps)):
+ if t == args.num_steps - 1:
+ nextnonterminal = 1.0 - next_done
+ next_return = next_value
+ else:
+ nextnonterminal = 1.0 - dones[t + 1]
+ next_return = returns[t + 1]
+ returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return
+ advantages = returns - values
+
+ # flatten the batch
+ b_obs = obs.reshape((-1,) + envs.single_observation_space.shape)
+ b_logprobs = logprobs.reshape(-1)
+ b_actions = actions.reshape((-1,) + envs.single_action_space.shape)
+ b_advantages = advantages.reshape(-1)
+ b_returns = returns.reshape(-1)
+ b_values = values.reshape(-1)
+
+ # Optimizing the policy and value network
+ b_inds = np.arange(args.batch_size)
+ clipfracs = []
+ for epoch in range(args.update_epochs):
+ np.random.shuffle(b_inds)
+ for start in range(0, args.batch_size, args.minibatch_size):
+ end = start + args.minibatch_size
+ mb_inds = b_inds[start:end]
+
+ _, newlogprob, entropy, newvalue = agent.get_action_and_value(b_obs[mb_inds], b_actions.long()[mb_inds])
+ logratio = newlogprob - b_logprobs[mb_inds]
+ ratio = logratio.exp()
+
+ with torch.no_grad():
+ # calculate approx_kl http://joschu.net/blog/kl-approx.html
+ old_approx_kl = (-logratio).mean()
+ approx_kl = ((ratio - 1) - logratio).mean()
+ clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()]
+
+ mb_advantages = b_advantages[mb_inds]
+ if args.norm_adv:
+ mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
+
+ # Policy loss
+ pg_loss1 = -mb_advantages * ratio
+ pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef)
+ pg_loss = torch.max(pg_loss1, pg_loss2).mean()
+
+ # Value loss
+ newvalue = newvalue.view(-1)
+ if args.clip_vloss:
+ v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2
+ v_clipped = b_values[mb_inds] + torch.clamp(
+ newvalue - b_values[mb_inds],
+ -args.clip_coef,
+ args.clip_coef,
+ )
+ v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2
+ v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
+ v_loss = 0.5 * v_loss_max.mean()
+ else:
+ v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean()
+
+ entropy_loss = entropy.mean()
+ loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
+
+ optimizer.zero_grad()
+ loss.backward()
+ nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
+ optimizer.step()
+
+ if args.target_kl is not None:
+ if approx_kl > args.target_kl:
+ break
+
+ y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
+ var_y = np.var(y_true)
+ explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y
+
+ # Evaluate agent
+ mean_eval_returns, stddev_eval_returns, normalized_mean_eval_returns = level_replay_evaluate_minigrid(
+ args.env_id, agent, args.num_eval_episodes, device, num_levels=0
+ )
+ mean_train_returns, stddev_train_returns, normalized_mean_train_returns = level_replay_evaluate_minigrid(
+ args.env_id, agent, args.num_eval_episodes, device, num_levels=200
+ )
+
+ # TRY NOT TO MODIFY: record rewards for plotting purposes
+ writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step)
+ writer.add_scalar("charts/episode_returns", np.mean(episode_rewards), global_step)
+ writer.add_scalar("losses/value_loss", v_loss.item(), global_step)
+ writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
+ writer.add_scalar("losses/entropy", entropy_loss.item(), global_step)
+ writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step)
+ writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
+ writer.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step)
+ writer.add_scalar("losses/explained_variance", explained_var, global_step)
+ print("SPS:", int(global_step / (time.time() - start_time)))
+ writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)
+
+ writer.add_scalar("test_eval/mean_episode_return", mean_eval_returns, global_step)
+ writer.add_scalar("test_eval/normalized_mean_eval_return", normalized_mean_eval_returns, global_step)
+ writer.add_scalar("test_eval/stddev_eval_return", stddev_eval_returns, global_step)
+
+ writer.add_scalar("train_eval/mean_episode_return", mean_train_returns, global_step)
+ writer.add_scalar("train_eval/normalized_mean_train_return", normalized_mean_train_returns, global_step)
+ writer.add_scalar("train_eval/stddev_train_return", stddev_train_returns, global_step)
+
+ writer.add_scalar("curriculum/completed_episodes", completed_episodes, step)
+
+ envs.close()
+ writer.close()
diff --git a/tests/cleanrl_cartpole_test.sh b/tests/cleanrl_cartpole_test.sh
old mode 100755
new mode 100644