diff --git a/pywatts/board.py b/pywatts/board.py index bd951dc..dadb303 100644 --- a/pywatts/board.py +++ b/pywatts/board.py @@ -2,7 +2,7 @@ import tensorflow as tf import subprocess writer = tf.summary.FileWriter("tensorboard") -checkpoint = tf.train.get_checkpoint_state('tf_pywatts_model') +checkpoint = tf.train.get_checkpoint_state('tf_pywatts_model_best') with tf.Session() as sess: saver = tf.train.import_meta_graph(checkpoint.model_checkpoint_path + '.meta') saver.restore(sess, checkpoint.model_checkpoint_path) diff --git a/pywatts/kcross.py b/pywatts/kcross.py index 6489c37..ed05c79 100644 --- a/pywatts/kcross.py +++ b/pywatts/kcross.py @@ -16,7 +16,7 @@ def split(data, k): data_list = data['dc'].tolist() # Each sample has 337 elements - samples = [data_list[i:i+337] for i in range(0, len(data_list) - 337, 20)] + samples = [data_list[i:i+337] for i in range(0, len(data_list) - 337, 30)] # Randomly shuffle samples random.shuffle(samples) @@ -42,13 +42,13 @@ def split(data, k): return X_train, y_train, X_eval, y_eval -def train(nn, X_train, y_train, X_eval, y_eval, steps=10): +def train(nn, X_train, y_train, X_eval, y_eval, steps=100): """Trains the Network nn using k-cross-validation""" evaluation = [] for count, train_data in enumerate(X_train): for i in range(steps): - nn.train(train_data, y_train[count], batch_size=30, steps=100) #batch_size=int(len(train_data['dc'])/336), steps=1) - evaluation.append(nn.evaluate(X_eval[count], y_eval[count], batch_size=int(len(X_eval[count]['dc'])/336))) + nn.train(train_data, y_train[count], batch_size=1000, steps=30) #batch_size=int(len(train_data['dc'])/336), steps=1) + evaluation.append(nn.evaluate(X_eval[count], y_eval[count])) print("Training %s: %s/%s" % (count, (i+1), steps)) return evaluation diff --git a/pywatts/neural.py b/pywatts/neural.py index bed842e..1304b8f 100644 --- a/pywatts/neural.py +++ b/pywatts/neural.py @@ -19,6 +19,9 @@ def pywatts_input_fn(X, y=None, num_epochs=None, shuffle=True, batch_size=1): else: dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) + if num_epochs is not None: + return dataset.batch(len(features['0'])) + if shuffle: return dataset.shuffle(len(features['0']*len(features)*4)).repeat().batch(batch_size) else: @@ -31,7 +34,7 @@ class Net: def __init__(self, feature_cols=__feature_cols): self.__regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols, - hidden_units=[64, 128, 64], + hidden_units=[128, 512, 128], model_dir='tf_pywatts_model') def train(self, training_data, training_results, batch_size, steps): diff --git a/pywatts/routines.py b/pywatts/routines.py index e676ba0..bf3919f 100644 --- a/pywatts/routines.py +++ b/pywatts/routines.py @@ -66,10 +66,12 @@ def train(nn, X_train, y_train, X_val, y_val, steps=100): def plot_training(evaluation): loss = [] + steps = [] for e in evaluation: - loss.append(e['average_loss']) + loss.append(e['loss']) + steps.append(e['global_step']) - pp.plot(loss) + pp.plot(steps, loss) # Needed for execution in PyCharm pp.show() diff --git a/pywatts/test_kcross_train.py b/pywatts/test_kcross_train.py index 8a2b848..d67db36 100644 --- a/pywatts/test_kcross_train.py +++ b/pywatts/test_kcross_train.py @@ -8,7 +8,7 @@ K = 10 NUM_EVAL_STATIONS = 40 TRAIN = True PLOT = True -TRAIN_STEPS = 20 +TRAIN_STEPS = 10 df = pywatts.db.rows_to_df(list(range(1, NUM_STATIONS_FROM_DB))) diff --git a/pywatts/test_predict24.py b/pywatts/test_predict24.py index 42700af..c931f3a 100644 --- a/pywatts/test_predict24.py +++ b/pywatts/test_predict24.py @@ -6,7 +6,7 @@ import matplotlib.pyplot as pp PREDICT_QUERY = "query-sample_24hour.json" PREDICT_RESULT = PREDICT_QUERY.replace("query", "result") -QUERY_ID = 4 +QUERY_ID = 0 pred_query = input_query("../sample_data/" + PREDICT_QUERY, QUERY_ID)