Fixed (?) kcross
This commit is contained in:
parent
2dfe5ef1b6
commit
288be08699
2 changed files with 23 additions and 15 deletions
|
@ -6,7 +6,7 @@ from playhouse.sqlite_ext import SqliteExtDatabase
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
db_path = os.path.join(BASE_DIR, "../pywatts.db")
|
db_path = os.path.join(BASE_DIR, "pywatts.db")
|
||||||
print(db_path)
|
print(db_path)
|
||||||
db = SqliteExtDatabase(db_path)
|
db = SqliteExtDatabase(db_path)
|
||||||
|
|
||||||
|
|
|
@ -30,22 +30,32 @@ def split(data, k):
|
||||||
train_samples = []
|
train_samples = []
|
||||||
for j in range(k):
|
for j in range(k):
|
||||||
if j == i:
|
if j == i:
|
||||||
eval_samples.extend(samples[i*bucketsize:(i+1)*bucketsize])
|
eval_samples.extend(samples[j*bucketsize:(j+1)*bucketsize])
|
||||||
else:
|
else:
|
||||||
train_samples.extend(samples[i*bucketsize:(i+1)*bucketsize])
|
train_samples.extend(samples[j*bucketsize:(j+1)*bucketsize])
|
||||||
|
|
||||||
# Create new dictionaries in the eval lists
|
# Create new dictionaries in the eval lists
|
||||||
X_eval.append({'dc': eval_samples[:-1]})
|
#X_eval.append({'dc': eval_samples[:-1]})
|
||||||
y_eval.append({'dc': eval_samples[-1]})
|
#y_eval.append({'dc': eval_samples[-1]})
|
||||||
|
X_eval.append({'dc': [x for s in eval_samples for c, x in enumerate(s, 1) if c % 337 != 0]})
|
||||||
|
y_eval.append({'dc': [x for s in eval_samples for c, x in enumerate(s, 1) if c % 337 == 0]})
|
||||||
|
|
||||||
X_train.append({'dc': train_samples[:-1]})
|
#X_train.append({'dc': train_samples[:-1]})
|
||||||
y_train.append({'dc': train_samples[-1]})
|
#y_train.append({'dc': train_samples[-1]})
|
||||||
|
X_train.append({'dc': [x for s in train_samples for c, x in enumerate(s, 1) if c % 337 != 0]})
|
||||||
|
y_train.append({'dc': [x for s in train_samples for c, x in enumerate(s, 1) if c % 337 == 0]})
|
||||||
|
|
||||||
print(len(X_eval))
|
#print(len(X_eval))
|
||||||
print(len(y_eval))
|
#print(len(y_eval))
|
||||||
|
#print(len(X_train))
|
||||||
print(len(X_train))
|
#print(len(y_train))
|
||||||
print(len(y_train))
|
#print(len(X_train[0]['dc']))
|
||||||
|
#print(len(y_train[0]['dc']))
|
||||||
|
#print(len(X_eval[0]['dc']))
|
||||||
|
#print(len(y_eval[0]['dc']))
|
||||||
|
#print(X_train)
|
||||||
|
#print(y_train)
|
||||||
|
#exit(0)
|
||||||
|
|
||||||
return X_train, y_train, X_eval, y_eval
|
return X_train, y_train, X_eval, y_eval
|
||||||
|
|
||||||
|
@ -56,12 +66,10 @@ def train(nn, X_train, y_train, X_eval, y_eval, steps=10):
|
||||||
for count, train_data in enumerate(X_train):
|
for count, train_data in enumerate(X_train):
|
||||||
for i in range(steps):
|
for i in range(steps):
|
||||||
nn.train(train_data, y_train[count], batch_size=int(len(train_data['dc'])/336), steps=1)
|
nn.train(train_data, y_train[count], batch_size=int(len(train_data['dc'])/336), steps=1)
|
||||||
print(X_eval[count])
|
|
||||||
print(len(X_eval[count]['dc']))
|
|
||||||
print(y_eval[count])
|
|
||||||
evaluation.append(nn.evaluate(X_eval[count], y_eval[count], batch_size=int(len(X_eval[count]['dc'])/336)))
|
evaluation.append(nn.evaluate(X_eval[count], y_eval[count], batch_size=int(len(X_eval[count]['dc'])/336)))
|
||||||
print("Training %s: %s/%s" % (count, (i+1), steps))
|
print("Training %s: %s/%s" % (count, (i+1), steps))
|
||||||
|
|
||||||
|
return evaluation
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue