Files
triton/python/autotune/pysrc/autotune.py

215 lines
11 KiB
Python
Raw Normal View History

2014-09-02 22:03:20 -04:00
from __future__ import division
import argparse, itertools, os, sys, json
2014-10-28 01:10:14 -04:00
import misc_tools, optimize, dataset
2014-09-02 22:03:20 -04:00
import pyopencl as cl
import pyviennacl as vcl
import pyatidlas as atd
import numpy as np
2014-09-02 22:03:20 -04:00
from numpy import random
from model import train_model
TYPES = { 'vector-axpy': {'template':atd.VectorAxpyTemplate,
2014-10-29 17:01:57 +01:00
'perf-index':lambda x: 2*x[0]*x[1][0]/x[2]*1e-9,
2014-09-06 00:39:38 -04:00
'perf-measure':'GB/s'},
2014-09-29 03:01:33 +02:00
'matrix-axpy': {'template':atd.MatrixAxpyTemplate,
2014-10-29 17:01:57 +01:00
'perf-index':lambda x: 2*x[0]*x[1][0]*x[1][1]/x[2]*1e-9,
2014-09-06 00:39:38 -04:00
'perf-measure':'GB/s'},
2014-09-29 03:01:33 +02:00
'reduction': {'template':atd.ReductionTemplate,
2014-10-05 06:33:50 +02:00
'perf-index':lambda x: 2*x[0]*x[1][0]/x[2]*1e-9,
2014-09-06 00:39:38 -04:00
'perf-measure':'GB/s'},
2014-09-29 03:01:33 +02:00
'row-wise-reduction': {'template':atd.RowWiseReductionTemplate,
2014-09-06 00:39:38 -04:00
'perf-index':lambda x: x[0]*x[1][0]*x[1][1]/x[2]*1e-9,
'perf-measure':'GB/s'},
2014-09-29 03:01:33 +02:00
'matrix-product': {'template': atd.MatrixProductTemplate,
2014-09-06 00:39:38 -04:00
'perf-index': lambda x: 2*x[1][0]*x[1][1]*x[1][2]/x[2]*1e-9,
'perf-measure': 'GFLOP/s'} }
2014-09-29 03:01:33 +02:00
2014-10-31 18:12:55 -04:00
def do_tuning(args):
device = args['device']
2014-10-29 17:01:57 +01:00
dname = misc_tools.sanitize_string(device.name)
2014-10-31 18:12:55 -04:00
if os.path.isfile(args['json-file']):
json_out = json.load(open(args['json-file'], 'r'))
else:
json_out = {}
json_out["version"] = "1.0"
2014-10-03 09:29:45 +02:00
def map_to_list(T, x):
return list(map(T, x if isinstance(x, list) else [x]))
2014-10-31 18:12:55 -04:00
if(args['method']=='simple'):
default_tuning_sizes = {'vector-axpy': args['blas1-sizes'], 'reduction': args['blas1-sizes'],
'matrix-axpy' : args['blas2-sizes'], 'row-wise-reduction' : args['blas2-sizes'],
'matrix-product': args['blas3-sizes']}
2014-10-29 17:01:57 +01:00
2014-10-29 12:45:20 -04:00
for operation in ['vector-axpy', 'reduction', 'matrix-axpy', 'row-wise-reduction', 'matrix-product']:
for datatype in [vcl.float32, vcl.float64]:
2014-10-31 18:12:55 -04:00
if any(x in args['exclude-operations'] for x in [operation, operation + '-' + datatype.__name__]):
2014-10-29 17:01:57 +01:00
continue
ctx = cl.Context([device])
ctx = vcl.backend.Context(ctx)
#Check data-type
if datatype is vcl.float64 and not device.double_fp_config:
sys.stderr.write('Warning : The device ' + device.name + ' does not support double precision! Skipping ...')
continue
#Helper for execution
def execute(device, node, other_params, sizes, fname = os.devnull, parameters = None):
with vcl.Statement(node) as statement:
2014-10-29 17:01:57 +01:00
if parameters is not None:
TemplateType = TYPES[operation]['template']
return misc_tools.benchmark(TemplateType(TemplateType.Parameters(*parameters),*other_params), statement, device)
with open(fname, "w+") as archive:
return optimize.genetic(statement, device, TYPES[operation]['template'], lambda p: TYPES[operation]['template'](p, *other_params),
lambda t: TYPES[operation]['perf-index']([datatype().itemsize, sizes, t]), TYPES[operation]['perf-measure'], archive)
def log_uniform_sample(a,b):
return np.exp(np.random.uniform(low=np.log(a), high=np.log(b), size=1)).astype(int)
def log_space_gen_product(a,b,N,dim):
N = int(N**(1.0/dim))
def log_space_gen(a,b):
for i in range(N):
v = int(np.exp(np.log(a) + (np.log(b) - np.log(a))*(i+1)/N))
yield (v//64 + 1)*64
return tuple(itertools.product(*[log_space_gen(a,b) for i in range(dim)]))
#Helper for tuning
def tune(execution_handler, a, b, dimsample, additional_parameters):
2014-10-29 17:01:57 +01:00
print('-----')
print(' '.join(map(str, ("Now tuning:", datatype.__name__, '-', operation, '-'.join(additional_parameters), '[' + device.name, '(' + device.platform.name + ')]'))))
#Update JSON
full_operation = operation + ''.join(additional_parameters)
if full_operation not in json_out:
json_out[full_operation] = {}
json_out[full_operation][datatype.__name__] = {}
D = json_out[full_operation][datatype.__name__]
2014-10-31 18:12:55 -04:00
if args['method'] == 'simple':
2014-10-29 17:01:57 +01:00
print default_tuning_sizes[operation]
profiles = [execution_handler(map(int,default_tuning_sizes[operation]))]
else:
def compute_perf(x, t):
return TYPES[operation]['perf-index']([datatype().itemsize, x, t])
2014-10-31 18:12:55 -04:00
profiles_generator = log_space_gen_product(a, b, args['sample-size'], dimsample)
2014-10-29 17:01:57 +01:00
profiles = dataset.sample_profiles(execution_handler, profiles_generator)
2014-10-31 18:12:55 -04:00
if args['build-model']:
dataset_generator = log_space_gen_product(a, b, 1000, dimsample)
2014-10-29 17:01:57 +01:00
X, Y, profiles = dataset.sample_dataset(os.path.join(full_operation,datatype.__name__), profiles, execution_handler, dataset_generator)
2014-10-28 01:10:14 -04:00
clf = train_model(X, Y, profiles, TYPES[operation]['perf-measure'])
D['predictor'] = [{'children_left': e.tree_.children_left.tolist(),
'children_right': e.tree_.children_right.tolist(),
2014-10-31 18:12:55 -04:00
'threshold': e.tree_.threshold.astype('float64').tolist(),
'feature': e.tree_.feature.astype('float64').tolist(),
'value': e.tree_.value[:,:,0].astype('float64').tolist()} for e in clf.estimators_]
if args['viennacl-src-path']:
misc_tools.update_viennacl_headers(args['viennacl-src-path'],device,datatype,operation,additional_parameters,profiles[0])
2014-10-29 17:01:57 +01:00
D['profiles'] = [map(int, x) for x in profiles]
#Vector AXPY
if operation=='vector-axpy':
def execution_handler(sizes, fname=os.devnull, parameters=None):
x = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
z = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
2014-10-29 17:01:57 +01:00
return execute(device, vcl.Assign(z, x), (), sizes, fname, parameters)
tune(execution_handler, 1e4, 1e7, 1, ())
#Reduction
if operation=='reduction':
def execution_handler(sizes, fname=os.devnull, parameters=None):
x = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
y = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
s = vcl.Scalar(0, context=ctx, dtype=datatype)
return execute(device, vcl.Assign(s, vcl.Dot(x,y)), (), sizes, fname, parameters)
tune(execution_handler, 1e4, 1e7, 1, ())
#Matrix AXPY
if operation=='matrix-axpy':
def execution_handler(sizes, fname=os.devnull, parameters=None):
A = vcl.Matrix(sizes, context=ctx, dtype=datatype, layout=vcl.COL_MAJOR)
C = vcl.Matrix(sizes, context=ctx, dtype=datatype, layout=vcl.COL_MAJOR)
2014-10-29 17:01:57 +01:00
return execute(device, vcl.Assign(C,A), (), sizes, fname, parameters)
tune(execution_handler, 100, 4000, 2, ())
#Row-wise reduction
if operation=='row-wise-reduction':
2014-10-31 18:12:55 -04:00
for A_trans in args['gemv-layouts']:
def execution_handler(sizes, fname=os.devnull, parameters=None):
A = vcl.Matrix(sizes if A_trans=='N' else sizes[::-1], context=ctx, dtype=datatype, layout=vcl.COL_MAJOR)
2014-10-29 17:01:57 +01:00
x = vcl.Vector(sizes[1], context=ctx, dtype=datatype)
y = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
LHS = A if A_trans=='N' else A.T
return execute(device, vcl.Assign(y, LHS*x), (), sizes, fname, parameters)
tune(execution_handler, 100, 4000, 2, (A_trans,))
#Matrix Product
if operation=='matrix-product':
2014-10-31 18:12:55 -04:00
for L in args['gemm-layouts']:
A_trans = L[0]
B_trans = L[1]
def execution_handler(sizes, fname=os.devnull, parameters=None):
2014-10-31 18:12:55 -04:00
A = vcl.Matrix((sizes[0], sizes[2]) if A_trans=='N' else (sizes[2],sizes[0]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR)
B = vcl.Matrix((sizes[2], sizes[1]) if B_trans=='N' else (sizes[1],sizes[2]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR)
LHS = A if A_trans=='N' else A.T
RHS = B if B_trans=='N' else B.T
alpha = vcl.HostScalar(1.0, context=ctx, dtype = datatype)
beta = vcl.HostScalar(1.0, context=ctx, dtype = datatype)
C = vcl.Matrix((sizes[0], sizes[1]), context=ctx, dtype = datatype, layout=vcl.COL_MAJOR)
2014-10-31 18:12:55 -04:00
return execute(device, vcl.Assign(C,LHS*RHS*alpha + C*beta),(A_trans,B_trans), sizes, fname, parameters)
tune(execution_handler, 100, 2000, 3,(A_trans,B_trans))
2014-10-29 17:01:57 +01:00
2014-10-31 18:12:55 -04:00
json.dump(json_out, open(args['json-file'],'w'))
2014-10-29 17:01:57 +01:00
2014-09-29 03:01:33 +02:00
2014-09-29 03:01:33 +02:00
2014-09-02 22:03:20 -04:00
if __name__ == "__main__":
devices = [d for platform in cl.get_platforms() for d in platform.get_devices()]
2014-10-31 18:12:55 -04:00
print("----------------")
print("Devices available:")
print("----------------")
for (i, d) in enumerate(devices):
print 'Device', i, '|', cl.device_type.to_string(d.type), '|', d.name, 'on', d.platform.name
print("----------------")
args = {}
def add_input(help, default):
return raw_input(help + "[" + default + "] : ") or default
args['device'] = devices[int(add_input('Device to tune for','0'))]
args['exclude-operations'] = add_input('Operations to exclude','vector-axpy,matrix-axpy,reduction,row-wise-reduction,matrix-product-float64').split(',')
if not 'matrix-product' in args['exclude-operations']:
args['gemm-layouts'] = add_input('GEMM Layouts', 'NN,NT,TN,TT').split(',')
if not 'row-wise-reduction' in args['exclude-operations']:
args['gemv-layouts'] = add_input('GEMV Layouts', 'N,T').split(',')
args['json-file'] = add_input('JSON File', misc_tools.sanitize_string(args['device'].name) + '.json')
args['method'] = add_input('Tuning type', 'simple')
if args['method'] == 'simple':
args['blas1-sizes'] = [int(float(add_input('BLAS1 size', '10e6')))]
args['blas2-sizes'] = map(int, add_input('BLAS2 sizes (M,N)', '2560,2560').split(','))
args['blas3-sizes'] = map(int, add_input('BLAS3 sizes (M,N,K)', '1024,1024,1024').split(','))
args['build-model'] = True
args['sample-size'] = 30
args['viennacl-src-path'] = ''
print("------")
print("Auto-tuning")
print("------")
do_tuning(args)