From 4889205375d291cd492c5c1b722493ae63ab235c Mon Sep 17 00:00:00 2001 From: nikhilrajaram Date: Mon, 17 Dec 2018 22:53:25 -0700 Subject: [PATCH] edit readme, include output file --- Network and Training.ipynb | 537 ++++++++++++++++++++++++ README.md | 6 + data/.DS_Store | Bin 14340 -> 12292 bytes data/output/jingle-bells-pred-train.mid | Bin 0 -> 11593 bytes 4 files changed, 543 insertions(+) create mode 100644 Network and Training.ipynb create mode 100644 data/output/jingle-bells-pred-train.mid diff --git a/Network and Training.ipynb b/Network and Training.ipynb new file mode 100644 index 0000000..9c22ac1 --- /dev/null +++ b/Network and Training.ipynb @@ -0,0 +1,537 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "jyLwghAJz7KB" + }, + "outputs": [], + "source": [ + "import os\n", + "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" \n", + "import midi\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import copy\n", + "import keras\n", + "import warnings\n", + "import time\n", + "import random\n", + "import itertools\n", + "import pickle\n", + "import keras\n", + "from keras.models import Sequential\n", + "from keras.layers import Dense, TimeDistributed, LSTM, Dropout, CuDNNLSTM, Embedding, Input, Conv1D\n", + "from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "7xm5Xp8uudy-" + }, + "outputs": [], + "source": [ + "mid_jingle = midi.read_midifile('data/jingle-bells-guitar-glenn-jarrett.mid')\n", + "mid_jingle.make_ticks_abs()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "fOhQgAAfpqZj" + }, + "outputs": [], + "source": [ + "def get_max_tick(track):\n", + " '''\n", + " track: list of MIDI events\n", + " \n", + " returns: last MIDI tick in track\n", + " '''\n", + " max_tick = None\n", + " \n", + " for event in reversed(track):\n", + " if type(event) in [midi.NoteOnEvent, midi.NoteOffEvent]:\n", + " max_tick = event.tick\n", + " break\n", + " \n", + " return max_tick\n", + "\n", + "def find_pitch(notes, pitch):\n", + " '''\n", + " notes: list/set of (pitch, velocity) tuples\n", + " pitch: pitch value to find\n", + " \n", + " returns: first (pitch, velocity) tuple that matches input pitch\n", + " '''\n", + " \n", + " for note in notes:\n", + " if note[0] == pitch:\n", + " return note\n", + " \n", + " print(notes, pitch)\n", + " \n", + "def parse_data(track):\n", + " '''\n", + " track: list of MIDI events\n", + " \n", + " returns: time series list of lists, each list fixed length,\n", + " contains activated notes at corresponding tick\n", + " '''\n", + " events = []\n", + " activated_notes = set([])\n", + " max_tick = get_max_tick(track)\n", + " \n", + " if max_tick is None:\n", + " raise ValueError()\n", + " \n", + " note_starts = {}\n", + " note_ends = {}\n", + " \n", + " for event in track:\n", + " if type(event) not in [midi.NoteOnEvent, midi.NoteOffEvent]:\n", + " continue\n", + " \n", + " tick = event.tick\n", + " pitch, velocity = event.data\n", + " \n", + " if velocity != 0:\n", + " try:\n", + " note_starts[tick].add((pitch, velocity))\n", + " except KeyError:\n", + " note_starts[tick] = set([(pitch, velocity)])\n", + " finally:\n", + " if (pitch, velocity) in activated_notes:\n", + " print((pitch, velocity), activated_notes)\n", + " velocity += np.random.choice([-1, 1])\n", + " \n", + " activated_notes.add((pitch, velocity))\n", + "\n", + " else:\n", + " pitch, velocity = find_pitch(activated_notes, pitch)\n", + " \n", + " try:\n", + " note_ends[tick].add((pitch, velocity))\n", + " except KeyError:\n", + " note_ends[tick] = set([(pitch, velocity)])\n", + " finally:\n", + " activated_notes.remove((pitch, velocity))\n", + " \n", + " for tick in range(max_tick):\n", + " try:\n", + " for note in note_starts[tick]:\n", + " activated_notes.add(note)\n", + " except KeyError:\n", + " pass\n", + " \n", + " try:\n", + " for note in note_ends[tick]:\n", + " activated_notes.remove(note)\n", + " except KeyError:\n", + " pass\n", + " \n", + " events.append(list(activated_notes))\n", + " \n", + " return keras.preprocessing.sequence.pad_sequences(events)\n", + "\n", + "def process_data(data, timestep):\n", + " '''\n", + " data: time series MIDI data\n", + " timestep: specifies length of convolution\n", + " \n", + " returns: 1-D convolution of time series data with window \n", + " specified by timestep\n", + " '''\n", + " X, y = [], []\n", + " for i in range(len(data)-timestep-1):\n", + " X.append(np.array([data[i:(i+timestep)]]))\n", + " y.append(np.array([data[(i+timestep)]]))\n", + " \n", + " X, y = np.array(X), np.array(y)\n", + " return X.reshape(*[_ for _ in X.shape if _ != 1]), \\\n", + " y.reshape(*[_ for _ in y.shape if _ != 1])" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "LNRepQCkwmz9" + }, + "outputs": [], + "source": [ + "class MidiScaler():\n", + " '''\n", + " Custom scaler for MIDI time series data\n", + " '''\n", + " def __init__(self):\n", + " pass\n", + " \n", + " def fit(self, data):\n", + " pass\n", + " \n", + " def fit_transform(self, data):\n", + " return ((data-64)/128).astype(np.float128)\n", + " \n", + " def transform(self, data):\n", + " return ((data-64)/128).astype(np.float128)\n", + " \n", + " def inverse_transform(self, data):\n", + " return ((data*128)+64).astype(np.float128)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ugNPvma2wn6s" + }, + "outputs": [], + "source": [ + "class RNN:\n", + " '''\n", + " Custom RNN class/data container\n", + " '''\n", + " def __init__(self, X, train_test_split=0.8, epochs=100, batch_size=32, lstm_units=128, timestep=256):\n", + " self.X = X\n", + " self.split = int(self.X.shape[0]*train_test_split)\n", + " self.X_train, self.X_test = self.X[:self.split], self.X[self.split:]\n", + " self.epochs = epochs\n", + " self.batch_size = batch_size\n", + " self.lstm_units = lstm_units\n", + " self.timestep = timestep\n", + " self.scaler = MidiScaler()\n", + " self.scale_data()\n", + " \n", + " self.model = Sequential()\n", + " self.model.add(\n", + " CuDNNLSTM(self.lstm_units, input_shape=(\n", + " self.timestep, self.X_train_processed.shape[-1]\n", + " ), \n", + " return_sequences=True)\n", + " )\n", + " self.model.add(Dropout(0.5))\n", + " self.model.add(CuDNNLSTM(self.lstm_units))\n", + " self.model.add(Dropout(0.5))\n", + " self.model.add(Dense(self.lstm_units, activation='relu'))\n", + " self.model.add(Dense(self.X_train_processed.shape[-1], activation='softmax'))\n", + " self.model.compile(optimizer='adam', loss=['mse'], metrics=['accuracy'])\n", + " \n", + " def flatten_data(self):\n", + " try:\n", + " self.X_train_flattened = self.X_train[:,:,0]\n", + " self.X_test_flattened = self.X_test[:,:,0]\n", + " except IndexError:\n", + " self.X_train_flattened = self.X_train\n", + " self.X_test_flattened = self.X_test\n", + " \n", + " def scale_data(self):\n", + " self.flatten_data()\n", + " self.scaler.fit(self.X_train_flattened)\n", + " self.X_train_scaled = self.scaler.transform(self.X_train_flattened)\n", + " self.X_test_scaled = self.scaler.transform(self.X_test_flattened)\n", + " \n", + " self.X_train_processed, self.y_train_processed = \\\n", + " process_data(self.X_train_scaled, self.timestep)\n", + " self.X_test_processed, self.y_test_processed = \\\n", + " process_data(self.X_test_scaled, self.timestep)\n", + " \n", + " def train(self, epochs, validation_split=0.25, verbose=0):\n", + " history = self.model.fit(self.X_train_processed, self.y_train_processed,\n", + " batch_size=self.batch_size, epochs=epochs,\n", + " validation_split=validation_split)\n", + "\n", + " return history" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "dj9mGuF1YpMY" + }, + "outputs": [], + "source": [ + "def detokenize_data(pred):\n", + " '''\n", + " pred: predicted MIDI time series data\n", + " \n", + " returns: corresponding MIDI pattern\n", + " '''\n", + " pattern = midi.Pattern()\n", + " track = midi.Track()\n", + " prev = []\n", + "\n", + " for tick, note_arr in enumerate(pred[:-1].tolist()):\n", + " for note in note_arr:\n", + " if note != 0: \n", + " if note in prev:\n", + " if note not in pred[tick+1]:\n", + " track.append(midi.NoteOffEvent(tick=tick, channel=10, data=[note, 0]))\n", + " else:\n", + " if note in pred[tick+1]:\n", + " track.append(midi.NoteOnEvent(tick=tick, channel=10, data=[note, 60]))\n", + " else:\n", + " track.append(midi.NoteOnEvent(tick=tick, channel=10, data=[note, 60]))\n", + " track.append(midi.NoteOffEvent(tick=tick, channel=10, data=[note, 0]))\n", + "\n", + " prev = note_arr\n", + "\n", + " for i, event in reversed(list(enumerate(track))):\n", + " if i == 0:\n", + " continue\n", + "\n", + " event.tick = (event.tick - track[i-1].tick)\n", + "\n", + " pattern.append(track)\n", + " \n", + " return pattern" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "P-nlqvepSVRn" + }, + "outputs": [], + "source": [ + "track_data = parse_data(mid_jingle[1])" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ayoFg5k6wqvB" + }, + "outputs": [], + "source": [ + "rnn = RNN(track_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 357 + }, + "colab_type": "code", + "id": "VQakuhnx7t2r", + "outputId": "5348e975-2295-4652-d1d6-6d3cbf3b73ef" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "cu_dnnlstm_3 (CuDNNLSTM) (None, 256, 128) 70144 \n", + "_________________________________________________________________\n", + "dropout_3 (Dropout) (None, 256, 128) 0 \n", + "_________________________________________________________________\n", + "cu_dnnlstm_4 (CuDNNLSTM) (None, 128) 132096 \n", + "_________________________________________________________________\n", + "dropout_4 (Dropout) (None, 128) 0 \n", + "_________________________________________________________________\n", + "dense_3 (Dense) (None, 128) 16512 \n", + "_________________________________________________________________\n", + "dense_4 (Dense) (None, 7) 903 \n", + "=================================================================\n", + "Total params: 219,655\n", + "Trainable params: 219,655\n", + "Non-trainable params: 0\n", + "_________________________________________________________________\n", + "None\n" + ] + } + ], + "source": [ + "print(rnn.model.summary())" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 714 + }, + "colab_type": "code", + "id": "uH-iiwtb__e7", + "outputId": "52686753-5411-4c94-b430-774eff939026" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train on 22770 samples, validate on 7590 samples\n", + "Epoch 1/20\n", + "22770/22770 [==============================] - 72s 3ms/step - loss: 0.1930 - acc: 0.6531 - val_loss: 0.1622 - val_acc: 0.4864\n", + "Epoch 2/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1915 - acc: 0.8441 - val_loss: 0.1618 - val_acc: 0.6331\n", + "Epoch 3/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1914 - acc: 0.8711 - val_loss: 0.1615 - val_acc: 0.7838\n", + "Epoch 4/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1914 - acc: 0.8899 - val_loss: 0.1616 - val_acc: 0.6556\n", + "Epoch 5/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1913 - acc: 0.8966 - val_loss: 0.1614 - val_acc: 0.7560\n", + "Epoch 6/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1913 - acc: 0.9018 - val_loss: 0.1615 - val_acc: 0.5750\n", + "Epoch 7/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1913 - acc: 0.9054 - val_loss: 0.1615 - val_acc: 0.7885\n", + "Epoch 8/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1913 - acc: 0.9079 - val_loss: 0.1617 - val_acc: 0.6889\n", + "Epoch 9/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1913 - acc: 0.9139 - val_loss: 0.1615 - val_acc: 0.6250\n", + "Epoch 10/20\n", + "22770/22770 [==============================] - 68s 3ms/step - loss: 0.1913 - acc: 0.9209 - val_loss: 0.1620 - val_acc: 0.6630\n", + "Epoch 11/20\n", + "22770/22770 [==============================] - 70s 3ms/step - loss: 0.1912 - acc: 0.9283 - val_loss: 0.1620 - val_acc: 0.6914\n", + "Epoch 12/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9270 - val_loss: 0.1620 - val_acc: 0.5444\n", + "Epoch 13/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9296 - val_loss: 0.1619 - val_acc: 0.6246\n", + "Epoch 14/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9307 - val_loss: 0.1618 - val_acc: 0.6246\n", + "Epoch 15/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9334 - val_loss: 0.1619 - val_acc: 0.6577\n", + "Epoch 16/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9389 - val_loss: 0.1620 - val_acc: 0.6242\n", + "Epoch 17/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9417 - val_loss: 0.1622 - val_acc: 0.6586\n", + "Epoch 18/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9344 - val_loss: 0.1621 - val_acc: 0.6253\n", + "Epoch 19/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9434 - val_loss: 0.1621 - val_acc: 0.6252\n", + "Epoch 20/20\n", + "22770/22770 [==============================] - 69s 3ms/step - loss: 0.1912 - acc: 0.9460 - val_loss: 0.1620 - val_acc: 0.7190\n" + ] + } + ], + "source": [ + "history = rnn.train(epochs=20, verbose=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "d9tC9sZLMJfe" + }, + "outputs": [], + "source": [ + "rnd = np.vectorize(round)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Zq9anpm1YztV" + }, + "outputs": [], + "source": [ + "scaled_pred = rnn.model.predict(rnn.X_train_processed)\n", + "pred = rnn.scaler.inverse_transform(scaled_pred)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "s1m30PQ_uxnF" + }, + "outputs": [], + "source": [ + "pattern = detokenize_data(rnd(pred).astype(int))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bwR1X4R_BIvr" + }, + "outputs": [], + "source": [ + "midi.write_midifile('data/output/jingle-bells-pred-train.mid', pattern)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "uy4bOFcuNd9E" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "Copy of Untitled1.ipynb", + "provenance": [], + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/README.md b/README.md index 3cd6ff4..9fd6a68 100644 --- a/README.md +++ b/README.md @@ -1 +1,7 @@ # music-gen-tool + +The `Exploring MIDI` notebook fleshes out our process of wrangling the data and experimenting with the `python-midi` library we used. + +The `Network Definition and Test` notebook delves into our implementation of the tokenizer and RNN. + +We have several MIDI songs under the `data` folder, as well as a sample output file in the `data/output` folder diff --git a/data/.DS_Store b/data/.DS_Store index 30e8580f923c48a077472f6fcb19f1e410b1e52a..5665329afc5777d59c2b02f65d114d49821fc3aa 100644 GIT binary patch delta 306 zcmZoEXh~3DU|?W$DortDV9)?EIe-{M3-ADmb_NCo?uiQej1Ch6awh9)2uyZVwb;zU z$j;BGGMQ0BXL2S`2uM%d!!ub?P-3%+z#PVUHimqLQic+S0uY&#ZWx@LpIZPl3O?$~0yfphc_@7in+H&pesm#FvK=5+Dpf+F)~r$sA?? Dkk&qx delta 183 zcmZokXem%&U|?W$DortDU@!nOIe-{M3-ADmb_NCoo{0+jj5ZSka(K#ui}G^v^U{G5 zjFZoc*=%NEWanp;+uR_qg>my1p>W>G3XESEWheJ5ifvX>U}j-AH8#*uFf^HLFYdHC zPh}eO#L1QuC+JMJ5D?g$D>;u1sQiN@E6_9`FyICfuE^GI7G!+SJegm|mxl@BDhrUh J%^5m#m;pPyCcppy diff --git a/data/output/jingle-bells-pred-train.mid b/data/output/jingle-bells-pred-train.mid new file mode 100644 index 0000000000000000000000000000000000000000..7754cce437e0166dc23bbf286bfaada022afca5b GIT binary patch literal 11593 zcmeI2$#zu95r(TIX!qcS7k&+|ysZTwnr<4z49(bpS`1LOq)w#@`o_ zk+&p)kL^XTe01(f-FtJ0jQHb^h`hCY=Jx7nH2P{Z=I@`&XP(?0jsEuc(d@uv_w|ft z)5(X?)`8LP>lx3cqw(zUWVCg7G}_f3<@r&bAEG@(dy4iH?T5*D_7fBDypHhx2y=d5 z&dteqYm=5|aed&O&!>~km(j_Nb^XNuKk;&j&IjpukVV{0{a*sy<=tKS?WbL$&r{l` z+&`y%J{oWB=NUWRde-K8iV#bqh3NBavKY^6%(KbnlX$>nvlk3{!Ju{8b=nPDHS>P5 z`8rg`%+B7g4gP;mhkXpLYnA^?Ww)5%7SgQIuF>x%?M?1~ru~`rD(zL~SmoWXjJv@7 z1)g7_y+V5n$$!oMvtM}j3w>|Vmo?_cGw*nIo@eLjYrmWHVXoPY$#i3DAA{-wF)k1j z-Jnz14%_$tefEh-KJorH)b$(fr^)msw$VP@F8~uAYxlNlz^f#>#$?x+q_k1Hyv*(p zNUh8+vI8{WZnrMe*BvVCMc!Y@ckD=Q_-?x1cjV6Oh+1Q3SLjf?vTbPW&ptBoNBX#9 zuiqyNvmbf(<7oQ&v%7PLzcXOx4x6-nm)Du(I{liBV`mpx;4;tM(GA)g?54)0jeGxD zju#W5so6DltJZvek=EU7``eT0^R3$mC2%eP_v@je1PqIH|2)IMGYk+|g5om$j?o^Y zJr}AB9Lz59ewpXX^wn&D5&-YT=hHk#5C|YWiv=&SsQ!A6m+q#dW;bD{HQKU!?sPm$ zK}4e(7|+hqXB7d?GLNRyWc2YpfyJUqk20w8tV-D!mhR#=n?96YOg)Ep{0M|+Ov z>}U%?6(n|Zo*k6S75D&LjM@HtjAjCix>C%A+F1|4={8CXB1`qNdn|O1h3?bdr%i;9 zXAil5$URKe@Gzb|;(yfmX6q3XsD{=FDjPaIVBiDpQKMSu#K8g>ex%) z3Q6VxlHQ!`L7*{xrca$X+~NTbO49Pd`*f~F?l9q<805sYP>f-LJ%PHLlj#e$=yro% za*(U0#3|k2AF;58e=Ngj@W(tTo^%o=Luw|PzIC6W zKgqNlYM3=Ui}xp7b9!nz3B3iO-a~>FR|HO5Pq8Zm(Vz;J_ zl^Tg(Zj*QpYY<*~C6o?M19w79_t&|=>lljn`JT8J^U{FcdO_=oMM0`ZHY(^=E~9)7 zk2ga3e=H8g(qhM=*1SKNz6xM9RD-Pqr0!ITFT_8bEJ}MX^{I%X*}+-HVUgQyu*9L} z8OS7G;VxPO>Jqt1EbpL7+ZG>~; z=^>S&TCS?FU;LGL8AFQ6U*h9Z;6k$}S6%M-SlEsWa|Joe*i(35`;A5h`nkiTiSXno zbc<{KIUEzGd=w7(Xfk~lj`s%=*I=#Umge!nkV&~;c3L^541yJx}mf-!=m9)%VRnE*}N4e5Je*s+76n1Dqk1aOvAQWxOG`Hd9VjPL^d%IZ@T-~{I&Bon%;W)m{@>D*Q9PRhq-l@xR@O$P6F4fdN0d~*a9Nm_-_3P71f zR60^9+zDyR2Z11@rC7@c*^hMPRd~IS*7FNZHG%bssmD&#>3oFcU_N8|;B+7UH>WFt zs+}ZBDdj?^B^O+sY?xsLyINH36u5EuE`eo)6nLz#MGWDKnNKVRN!&B}wgy?5e0sOd z`Q&eT+^Zl-3~%Xw&q*Vu^FI8+c%E@S3m~)d7ULuDy*l}(pmaa{VTkp{{&~J`^jwB2 zJ=OlTa2YMzOz$2LmMf!Erv%FkO)^>=9CX_zV3IvNhw1*G3xe+L@gM&U|EzrR-{yki zq^Rk0xuEnTg<)n(4r-8~8DS9lqC9#PBVE%N;=+Q-eDE3BC?eUP*(sQSyI3VIB+ijCS;Vih;OYovxqz3+v8M}F_EjE+=C{mjJa($%4HoRX z%R{?yWHZ0gM@y+>6KyTVaYhK{;y5M(nf)wA*x=~uiSBHNSiCax;#&1QSZ;xS1wIAE z)#sy|Cp9H?r9>&c#!5AkXt)a`7;5uOfwD|eN>7v2gud0@MFGPB9VEBdc82k(6@qI_ zYYbGCGgnTPs?jEcS_OsbOjUZO6&%&UovvkCl!rBvVdJaFlH7n*ltI%ou?&#o5crBD zAm}APODhN0giSNSp85YElnL8)Cbe$@Eh>aM!0WcYZmuR8J=x2N`>_*?4imCKpe~`j zI2IV(yMYZ0Km5QMm@Oeu%!l7Sg%0^T@VxgZRt>e$Z}$kF#F_j zpzrYP@Qu%w5(ntO$*S3me$peAo||5tnSLhG`Ava{71@4!*0Z1!2xYWeVy-3T^7zl{ z;ROHdJI8r{koFj@QSCT_8O-#jE0a$f;iw0B;E9Kq=Y>+_4|M*4Njy9BI>FT_lS9rp zXLON%wFt?Au}D8C6*S8{TV}3f{9jdu0-fafNuD3&IrJ66xpvj@ojhX!GofBXkHZR50x`t5P5l#$Cp+$93!mTAu0~YTdxVvkzoo*6;Ybj zn)Zem`XfU9h;S8C1(TOv;-=LE=J6YbpC|(80<|<>arX%C!C9XtW5QVc3Gf!_;`>N| zy;W5)rJ?1;cg<*Lud1J1qtZxmQEnRe{P_Irtyr`HOt%Z}-n;C?Opz*0LtgOuGr@u(Amf>gCx{tSvOT1luB_IqvC z9aJSXrG|2fo>YoTt@UIEygi}-#tOI=i)bv1k6$L=wkR>Z@7ZiXud2-@1|6m&0lxjn zrJxiL3>ccCzogGg+9``4V9W;h8%&y z-}$WIc#9VS!HA1y9&)7g^F1xWzK$MSn;dD5*&IKT3U>?=wmG8CXclz149Q^fx+})B2 z`YbTcdS@?W<$kAYL7l=}`nA=ciYT;-SJgmeu~$s4rqz5|hVtSIr13=y-3ebSqF|wz zp!}<1z(Jl{gFDx7uOKbF+*Nug>cEpYLxiX3`^ImfXqFbbZ{Majh z;N_;YrigfoV%i+$t^I$somp2RKueoP7uuM!=L#ant8$P0w2>7^<;l+1P-C=~;SCQV zgGX!_2?EsedD_EwuP_=h>`uVKB3Vs@!(vOnT%i8*Hg^nVw7d*rN z{NY-Z*Xw*V$RDpwA8(`m4HnixjUQ;Q6G3U?5Q`n+-5Tv0ZE225B(&;|-C_C23im7A z&-qwr&>Y2lrou&|csJdHZH+Z+D9aWK8cKrQE6{_YKue8FX5AI!!BOfl7PT-a`c@+524C-sMAH=Pq<<*)G40qP3?AdQ&I8LyPm^ET*wq%K9 e6a%J=!*2}W<1F(}P5#pO_)nfV5tsf76Z{MEic}#0 literal 0 HcmV?d00001