More robust dataset
This commit is contained in:
@@ -101,7 +101,7 @@ def do_tuning(config_fname, spec_fname, viennacl_root):
|
|||||||
if 'all' in layouts:
|
if 'all' in layouts:
|
||||||
layouts = ['NN', 'NT', 'TN', 'TT']
|
layouts = ['NN', 'NT', 'TN', 'TT']
|
||||||
for layout in layouts:
|
for layout in layouts:
|
||||||
def execution_handler(sizes, fname, parameters=None):
|
def execution_handler(sizes, fname=os.devnull, parameters=None):
|
||||||
A_trans = layout[0]
|
A_trans = layout[0]
|
||||||
B_trans = layout[1]
|
B_trans = layout[1]
|
||||||
A = vcl.Matrix((sizes[0], sizes[1]) if A_trans=='N' else (sizes[1],sizes[0]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR);
|
A = vcl.Matrix((sizes[0], sizes[1]) if A_trans=='N' else (sizes[1],sizes[0]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR);
|
||||||
@@ -116,7 +116,7 @@ def do_tuning(config_fname, spec_fname, viennacl_root):
|
|||||||
TemplateType = TYPES[operation]['template']
|
TemplateType = TYPES[operation]['template']
|
||||||
return tools.benchmark(TemplateType(TemplateType.Parameters(*parameters),A_trans,B_trans), statement, device)
|
return tools.benchmark(TemplateType(TemplateType.Parameters(*parameters),A_trans,B_trans), statement, device)
|
||||||
else:
|
else:
|
||||||
execute(statement,(A_trans, B_trans), sizes, fname)
|
return execute(statement,(A_trans, B_trans), sizes, fname)
|
||||||
X, Y, profiles = generate_dataset(TYPES[operation]['template'], execution_handler)
|
X, Y, profiles = generate_dataset(TYPES[operation]['template'], execution_handler)
|
||||||
train_model(X, Y, profiles)
|
train_model(X, Y, profiles)
|
||||||
|
|
||||||
|
@@ -3,7 +3,7 @@ import sys
|
|||||||
import re
|
import re
|
||||||
import random
|
import random
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.neighbors.kde import KernelDensity;
|
from sklearn.neighbors.kde import KernelDensity
|
||||||
from pyviennacl.atidlas import FetchingPolicy
|
from pyviennacl.atidlas import FetchingPolicy
|
||||||
|
|
||||||
def decode(y):
|
def decode(y):
|
||||||
@@ -12,91 +12,83 @@ def decode(y):
|
|||||||
y[8] = fetch[y[8]]
|
y[8] = fetch[y[8]]
|
||||||
return y
|
return y
|
||||||
|
|
||||||
|
def resample(X, tbincount, densities, step):
|
||||||
|
Xtuples = [tuple(x) for x in X]
|
||||||
|
r = random.random()
|
||||||
|
while(True):
|
||||||
|
if(len(tbincount)==0 or len(densities)==0 or r<=1.0/len(densities)):
|
||||||
|
x = np.array([step*random.randint(1,40), step*random.randint(1,40), step*random.randint(1,40)]);
|
||||||
|
else:
|
||||||
|
probs = [1.0/x if x>0 else 0 for x in tbincount]
|
||||||
|
distr = np.random.choice(range(tbincount.size), p = probs/np.sum(probs))
|
||||||
|
x = densities[distr].sample()[0]
|
||||||
|
x = np.maximum(np.ones(x.shape),(x - step/2).astype(int)/step + 1)*step
|
||||||
|
if tuple(x) not in Xtuples:
|
||||||
|
break
|
||||||
|
return x.astype(int)
|
||||||
|
|
||||||
def generate_dataset(TemplateType, execution_handler):
|
def generate_dataset(TemplateType, execution_handler):
|
||||||
I = 2
|
I = 0
|
||||||
step = 64;
|
step = 64
|
||||||
max_size = 4000;
|
max_size = 4000
|
||||||
|
|
||||||
#Retrieves the existing data
|
|
||||||
print "Retrieving data..."
|
|
||||||
path = "./data"
|
path = "./data"
|
||||||
files = os.listdir(path)
|
|
||||||
X = np.empty((len(files),3))
|
|
||||||
t = np.empty(len(files))
|
|
||||||
profiles = []
|
|
||||||
nonemptyfiles = []
|
|
||||||
for i,fname in enumerate(files):
|
|
||||||
if os.path.getsize(os.path.join(path,fname))>0:
|
|
||||||
nonemptyfiles.append(fname)
|
|
||||||
files = nonemptyfiles
|
|
||||||
|
|
||||||
for i,fname in enumerate(files):
|
#Tries to resume
|
||||||
MNK = re.search(r"([0-9]+)-([0-9]+)-([0-9]+).csv", fname)
|
try:
|
||||||
fl = open(os.path.join(path,fname),"rb")
|
X = np.loadtxt(open(os.path.join(path, "X.csv"),"rb"))
|
||||||
A = np.loadtxt(fl,delimiter=',')
|
t = np.loadtxt(open(os.path.join(path, "t.csv"),"rb"))
|
||||||
x = np.array([MNK.group(1), MNK.group(2), MNK.group(3)]).astype(float)
|
profiles = np.loadtxt(open(os.path.join(path, "profiles.csv"),"rb")).tolist()
|
||||||
y = tuple(A[np.argmin(A[:,0]),1:])
|
if not isinstance(profiles[0], list):
|
||||||
if y not in profiles:
|
profiles = [profiles]
|
||||||
profiles.append(y)
|
N = t.size
|
||||||
idx = profiles.index(y)
|
X.resize((N+I, 3), refcheck=False)
|
||||||
X[i,:] = x
|
t.resize(N+I, refcheck=False)
|
||||||
t[i] = idx
|
print 'Resuming dataset generation...'
|
||||||
|
except:
|
||||||
|
X = np.empty((I,I))
|
||||||
|
t = np.empty(I)
|
||||||
|
profiles = []
|
||||||
|
N = 0
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
#Generates new data
|
#Generates new data
|
||||||
print "Generating new data..."
|
print "Getting some good profiles..."
|
||||||
kdes = [KernelDensity(kernel='gaussian', bandwidth=2*step).fit(X[t==i,:]) for i in range(int(max(t))+1)] if files else [];
|
densities = [KernelDensity(kernel='gaussian', bandwidth=2*step).fit(X[t==i,:]) for i in range(int(max(t))+1)] if N else [];
|
||||||
X.resize((len(files)+I, 3), refcheck=False);
|
X.resize((N+I, 3), refcheck=False)
|
||||||
t.resize(len(files)+I, refcheck=False);
|
t.resize(N+I, refcheck=False)
|
||||||
|
|
||||||
max_square = max_size/step
|
|
||||||
for i in range(I):
|
for i in range(I):
|
||||||
n_per_label = np.bincount(t[0:i+1].astype(int));
|
tbincount = np.bincount(t[0:i+1].astype(int))
|
||||||
Xtuples = [tuple(x) for x in X];
|
x = resample(X, tbincount, densities, step)
|
||||||
r = random.random();
|
y = execution_handler(x)
|
||||||
while(True):
|
|
||||||
if(len(kdes)==0 or r<=1.0/len(kdes)):
|
|
||||||
x = np.array([step*random.randint(1,40), step*random.randint(1,40), step*random.randint(1,40)]);
|
|
||||||
else:
|
|
||||||
probs = (1.0/n_per_label)
|
|
||||||
distr = np.random.choice(range(n_per_label.size), p = probs/np.sum(probs))
|
|
||||||
x = kdes[distr].sample()[0]
|
|
||||||
x = np.maximum(np.ones(x.shape),(x - step/2).astype(int)/step + 1)*step
|
|
||||||
if tuple(x) not in Xtuples:
|
|
||||||
break;
|
|
||||||
x = x.astype(int)
|
|
||||||
x = [1536,1536,1536]
|
|
||||||
fname = os.path.join(path, `x[0]` +"-"+ `x[1]` +"-"+ `x[2]` +".csv")
|
|
||||||
#Execute auto-tuning procedure
|
|
||||||
execution_handler(x, fname)
|
|
||||||
#Load csv into matrix
|
|
||||||
fl = open(fname,"rb");
|
|
||||||
A = np.loadtxt(fl,delimiter=',');
|
|
||||||
#Update the kernel density estimators
|
|
||||||
y = tuple(A[np.argmin(A[:,0]),1:]);
|
|
||||||
if y not in profiles:
|
if y not in profiles:
|
||||||
profiles.append(y);
|
profiles.append(y)
|
||||||
kdes.append(KernelDensity(kernel='gaussian', bandwidth=2*step));
|
densities.append(KernelDensity(kernel='gaussian', bandwidth=2*step))
|
||||||
idx = profiles.index(y);
|
idx = profiles.index(y)
|
||||||
#Update data
|
X[N+i,:] = x
|
||||||
X[len(files)+i,:] = x;
|
t[N+i] = idx
|
||||||
t[len(files)+i] = idx;
|
densities[idx].fit(X[t[0:N+i+1]==idx,:])
|
||||||
#Update density estimator p(M,N,K | t=idx)
|
np.savetxt(os.path.join(path,"X.csv"), X)
|
||||||
kdes[idx].fit(X[t[0:len(files)+i+1]==idx,:]);
|
np.savetxt(os.path.join(path,"t.csv"), t)
|
||||||
|
np.savetxt(os.path.join(path,"profiles.csv"), profiles)
|
||||||
|
|
||||||
|
print "Generating the dataset..."
|
||||||
print "Exporting data...";
|
N = 500
|
||||||
#Shuffle the list of file
|
Y = np.empty((N, len(profiles)))
|
||||||
files = os.listdir(path)
|
X = np.empty((N,3))
|
||||||
X = np.empty((len(files),3))
|
t = []
|
||||||
Y = np.zeros((len(files), len(profiles)))
|
for i in range(N):
|
||||||
for i,fname in enumerate(files):
|
x = resample(X, np.bincount(t), densities, step)
|
||||||
MNK = re.search(r"([0-9]+)-([0-9]+)-([0-9]+).csv", fname)
|
|
||||||
X[i,:] = map(float,[MNK.group(k) for k in range(1,4)])
|
|
||||||
fl = open(os.path.join(path,fname),"rb");
|
|
||||||
A = np.loadtxt(fl,delimiter=',')
|
|
||||||
for j,y in enumerate(profiles):
|
for j,y in enumerate(profiles):
|
||||||
idx = np.where(np.all(A[:,1:]==y,axis=1))[0]
|
T = execution_handler(x, os.devnull, decode(map(int, y)))
|
||||||
T = A[idx[0], 0] if idx.size else execution_handler(map(int,X[i,:]), '', decode(map(int, y)))
|
Y[i,j] = 2*1e-9*x[0]*x[1]*x[2]/T
|
||||||
Y[i,j] = 2*1e-9*X[i,0]*X[i,1]*X[i,2]/T
|
idx = np.argmax(Y[i,:])
|
||||||
|
X[i,:] = x
|
||||||
|
t = np.argmax(Y[:i+1,], axis=1)
|
||||||
|
densities[idx].fit(X[t==idx,:])
|
||||||
|
|
||||||
|
np.savetxt(os.path.join(path,"Y.csv"), Y)
|
||||||
|
|
||||||
|
|
||||||
return X, Y, profiles
|
return X, Y, profiles
|
||||||
|
@@ -181,4 +181,4 @@ class GeneticOperators(object):
|
|||||||
sys.stdout.write('Time %d | Best %d %s [ for %s ]\r'%(time.time() - start_time, best_performance, perf_metric, best_profile))
|
sys.stdout.write('Time %d | Best %d %s [ for %s ]\r'%(time.time() - start_time, best_performance, perf_metric, best_profile))
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
sys.stdout.write('\n')
|
sys.stdout.write('\n')
|
||||||
return population
|
return self.decode(hof[0])
|
||||||
|
@@ -9,8 +9,6 @@ def train_model(X, Y, profiles):
|
|||||||
X = scaler.transform(X);
|
X = scaler.transform(X);
|
||||||
ref = np.argmax(np.bincount(np.argmax(Y, axis=1))) #most common profile
|
ref = np.argmax(np.bincount(np.argmax(Y, axis=1))) #most common profile
|
||||||
|
|
||||||
print Y
|
|
||||||
print np.bincount(np.argmax(Y, axis=1))
|
|
||||||
#Cross-validation data-sets
|
#Cross-validation data-sets
|
||||||
cut = int(0.5*X.shape[0]+1);
|
cut = int(0.5*X.shape[0]+1);
|
||||||
XTr = X[0:cut, :];
|
XTr = X[0:cut, :];
|
||||||
|
@@ -50,4 +50,4 @@ from genetic import GeneticOperators
|
|||||||
|
|
||||||
def genetic(statement, context, TemplateType, build_template, parameter_names, compute_perf, perf_metric, out):
|
def genetic(statement, context, TemplateType, build_template, parameter_names, compute_perf, perf_metric, out):
|
||||||
GA = GeneticOperators(context.devices[0], statement, parameter_names, TemplateType, build_template, out)
|
GA = GeneticOperators(context.devices[0], statement, parameter_names, TemplateType, build_template, out)
|
||||||
GA.optimize(maxtime='2m30s', maxgen=1000, compute_perf=compute_perf, perf_metric=perf_metric)
|
return GA.optimize(maxtime='2m30s', maxgen=1000, compute_perf=compute_perf, perf_metric=perf_metric)
|
||||||
|
Reference in New Issue
Block a user