Now everything is entirely handled through the command line

This commit is contained in:
Philippe Tillet
2014-10-27 22:22:07 -04:00
parent 740ba08c5c
commit 1bc7d50258
2 changed files with 109 additions and 134 deletions

View File

@@ -1,23 +0,0 @@
#~ viennacl-src-root = /home/philippe/Development/viennacl-dev/viennacl/
[vector-axpy]
precision = single
#~ size = 5000000
#~ [reduction]
#~ precision = single, double
#~ size = 5000000
#~
#~ [matrix-axpy]
#~ precision = single, double
#~ size = 2560, 2560
#~
#~ [row-wise-reduction]
#~ precision = single, double
#~ layout = N,T
#~ size = 2560, 2560
#~
#~ [matrix-product]
#~ precision = single, double
#~ layout = NN,NT,TN,TT
#~ size = 1536, 1536, 1536

View File

@@ -14,9 +14,6 @@ from dataset import generate_dataset
from model import train_model from model import train_model
DATATYPES = { 'single' : vcl.float32,
'double' : vcl.float64 }
TYPES = { 'vector-axpy': {'template':atd.VectorAxpyTemplate, TYPES = { 'vector-axpy': {'template':atd.VectorAxpyTemplate,
'perf-index':lambda x: 3*x[0]*x[1][0]/x[2]*1e-9, 'perf-index':lambda x: 3*x[0]*x[1][0]/x[2]*1e-9,
'perf-measure':'GB/s'}, 'perf-measure':'GB/s'},
@@ -38,125 +35,119 @@ TYPES = { 'vector-axpy': {'template':atd.VectorAxpyTemplate,
'perf-measure': 'GFLOP/s'} } 'perf-measure': 'GFLOP/s'} }
def do_tuning(config_fname, viennacl_root, device): def do_tuning(args, devices):
json_out = {} json_out = {}
config = ConfigObj(config_fname) device = devices[args.device]
def map_to_list(T, x): def map_to_list(T, x):
return list(map(T, x if isinstance(x, list) else [x])) return list(map(T, x if isinstance(x, list) else [x]))
if(args.method=='unique'):
default_tuning_sizes = {'vector-axpy': tuple(args.sizes[:1]), 'reduction': tuple(args.sizes[:1]),
'matrix-axpy' : tuple(args.sizes[1:3]), 'row-wise-reduction' : tuple(args.sizes[1:3]),
'matrix-product': tuple(args.sizes[3:])}
for operation in ['vector-axpy', 'matrix-axpy', 'reduction', 'row-wise-reduction', 'matrix-product']: for operation in ['vector-axpy', 'matrix-axpy', 'reduction', 'row-wise-reduction', 'matrix-product']:
if operation in config: #Iterate through the datatypes
p = config[operation] for datatype in [vcl.float32, vcl.float64]:
precisions = map_to_list(str, p['precision'])
if 'all' in precisions:
precisions = ['single','double']
datatypes = [DATATYPES[k] for k in precisions]
#Iterate through the datatypes ctx = cl.Context([device])
for datatype in datatypes: ctx = vcl.backend.Context(ctx)
ctx = cl.Context([device]) #Check data-type
ctx = vcl.backend.Context(ctx) if datatype is vcl.float64 and not device.double_fp_config:
sys.stderr.write('Warning : The device ' + device.name + ' does not support double precision! Skipping ...')
continue
#Check data-type #Helper for execution
if datatype is vcl.float64 and not device.double_fp_config: def execute(device, node, other_params, sizes, fname = os.devnull, parameters = None):
sys.stderr.write('Warning : The device ' + device.name + ' does not support double precision! Skipping ...') with vcl.Statement(node) as statement:
continue if parameters:
TemplateType = TYPES[operation]['template']
return misc_tools.benchmark(TemplateType(TemplateType.Parameters(*parameters),*other_params), statement, device)
print('-----')
print(' '.join(map(str, ("Now tuning:", datatype.__name__, '-', operation, '-'.join(other_params), '[' + device.name, '(' + device.platform.name + ')] for sizes', sizes))))
with open(fname, "w+") as archive:
return optimize.genetic(statement, device, TYPES[operation]['template'], lambda p: TYPES[operation]['template'](p, *other_params),
lambda t: TYPES[operation]['perf-index']([datatype().itemsize, sizes, t]), TYPES[operation]['perf-measure'], archive)
#Helper for execution #Helper for tuning
def execute(device, node, other_params, sizes, fname = os.devnull, parameters = None): def tune(execution_handler, nTuning, nDataPoints, draw, additional_parameters):
with vcl.Statement(node) as statement: #Update JSON
if parameters: full_operation = operation + ''.join(additional_parameters)
TemplateType = TYPES[operation]['template'] if full_operation not in json_out:
return misc_tools.benchmark(TemplateType(TemplateType.Parameters(*parameters),*other_params), statement, device) json_out[full_operation] = {}
print('-----') json_out[full_operation][datatype.__name__] = {}
print(' '.join(map(str, ("Now tuning:", datatype.__name__, '-', operation, '-'.join(other_params), '[' + device.name, '(' + device.platform.name + ')] for sizes', sizes)))) D = json_out[full_operation][datatype.__name__]
with open(fname, "w+") as archive:
return optimize.genetic(statement, device, TYPES[operation]['template'], lambda p: TYPES[operation]['template'](p, *other_params),
lambda t: TYPES[operation]['perf-index']([datatype().itemsize, sizes, t]), TYPES[operation]['perf-measure'], archive)
#Helper for tuning if args.method == 'unique':
def tune(execution_handler, nTuning, nDataPoints, draw, additional_parameters): profiles = [execution_handler(map(int,default_tuning_sizes[operation]))]
if 'size' in p: if args.viennacl_src_path:
profile = execution_handler(map_to_list(int, p['size'])) misc_tools.update_viennacl_headers(args.viennacl_src_path,device,datatype,operation,additional_parameters,profiles[0])
if 'viennacl-src-root' in config: else:
misc_tools.update_viennacl_headers(config['viennacl-src-root'],device,datatype,operation,additional_parameters,profile) def compute_perf(x, t):
else: return TYPES[operation]['perf-index']([datatype().itemsize, x, t])
def compute_perf(x, t): X, Y, profiles = generate_dataset(TYPES[operation]['template'], execution_handler, nTuning, nDataPoints, draw)
return TYPES[operation]['perf-index']([datatype().itemsize, x, t]) clf = train_model(X, Y, profiles, TYPES[operation]['perf-measure'])
X, Y, profiles = generate_dataset(TYPES[operation]['template'], execution_handler, nTuning, nDataPoints, draw) D['predictor'] = [{'children_left': e.tree_.children_left.tolist(),
clf = train_model(X, Y, profiles, TYPES[operation]['perf-measure']) 'children_right': e.tree_.children_right.tolist(),
'threshold': e.tree_.threshold.astype('float32').tolist(),
'feature': e.tree_.feature.astype('float32').tolist(),
'value': e.tree_.value[:,:,0].astype('float32').tolist()} for e in clf.estimators_]
D['profiles'] = [ prof.astype('int').tolist() for prof in profiles]
#Update JSON
full_operation = operation + ''.join(additional_parameters)
if full_operation not in json_out:
json_out[full_operation] = {}
json_out[full_operation][datatype.__name__] = {}
D = json_out[full_operation][datatype.__name__]
D['profiles'] = [ prof.astype('int').tolist() for prof in profiles]
D['predictor'] = [{'children_left': e.tree_.children_left.tolist(),
'children_right': e.tree_.children_right.tolist(),
'threshold': e.tree_.threshold.astype('float32').tolist(),
'feature': e.tree_.feature.astype('float32').tolist(),
'value': e.tree_.value[:,:,0].astype('float32').tolist()} for e in clf.estimators_]
#Vector AXPY #Vector AXPY
if operation=='vector-axpy': if operation=='vector-axpy':
def execution_handler(sizes, fname=os.devnull, parameters=None): def execution_handler(sizes, fname=os.devnull, parameters=None):
x = vcl.Vector(sizes[0], context=ctx, dtype=datatype) x = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
y = vcl.Vector(sizes[0], context=ctx, dtype=datatype) y = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
z = vcl.Vector(sizes[0], context=ctx, dtype=datatype) z = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
return execute(device, vcl.Assign(z, vcl.ElementProd(vcl.exp(x + y),vcl.cos(x + y))), (), sizes, fname, parameters) return execute(device, vcl.Assign(z, vcl.ElementProd(vcl.exp(x + y),vcl.cos(x + y))), (), sizes, fname, parameters)
tune(execution_handler, 30, 1000, lambda : 64*np.random.randint(low=10, high=100000, size=1), ()) tune(execution_handler, 30, 1000, lambda : 64*np.random.randint(low=10, high=100000, size=1), ())
#Reduction #Reduction
if operation=='reduction': if operation=='reduction':
def execution_handler(sizes, fname=os.devnull, parameters=None): def execution_handler(sizes, fname=os.devnull, parameters=None):
x = vcl.Vector(sizes[0], context=ctx, dtype=datatype) x = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
y = vcl.Vector(sizes[0], context=ctx, dtype=datatype) y = vcl.Vector(sizes[0], context=ctx, dtype=datatype)
s = vcl.Scalar(0, context=ctx, dtype=datatype) s = vcl.Scalar(0, context=ctx, dtype=datatype)
return execute(device, vcl.Assign(s, vcl.Dot(x,y)), (), sizes, fname, parameters) return execute(device, vcl.Assign(s, vcl.Dot(x,y)), (), sizes, fname, parameters)
tune(execution_handler, 50, 1000, lambda : 64*np.random.randint(low=10, high=100000, size=1), ()) tune(execution_handler, 30, 1000, lambda : 64*np.random.randint(low=10, high=100000, size=1), ())
#Matrix AXPY #Matrix AXPY
if operation=='matrix-axpy': if operation=='matrix-axpy':
def execution_handler(sizes, fname=os.devnull, parameters=None): def execution_handler(sizes, fname=os.devnull, parameters=None):
A = vcl.Matrix(sizes, context=ctx, dtype=datatype) A = vcl.Matrix(sizes, context=ctx, dtype=datatype)
B = vcl.Matrix(sizes, context=ctx, dtype=datatype) B = vcl.Matrix(sizes, context=ctx, dtype=datatype)
C = vcl.Matrix(sizes, context=ctx, dtype=datatype) C = vcl.Matrix(sizes, context=ctx, dtype=datatype)
return execute(device, vcl.Assign(C,A+B), (), sizes, fname, parameters) return execute(device, vcl.Assign(C,A+B), (), sizes, fname, parameters)
tune(execution_handler, 50, 1000, lambda : 64*np.random.randint(low=5, high=100, size=2), ()) tune(execution_handler, 30, 1000, lambda : 64*np.random.randint(low=5, high=100, size=2), ())
#Row-wise reduction #Row-wise reduction
if operation=='row-wise-reduction': if operation=='row-wise-reduction':
layouts = map_to_list(str,p['layout']) layouts = ['N', 'T']
if 'all' in layouts: for A_trans in layouts:
layouts = ['N', 'T'] def execution_handler(sizes, fname=os.devnull, parameters=None):
for A_trans in layouts: A = vcl.Matrix(sizes if A_trans=='N' else sizes[::-1], context=ctx, dtype=datatype, layout=vcl.COL_MAJOR)
def execution_handler(sizes, fname=os.devnull, parameters=None): x = vcl.Vector(sizes[1] if A_trans=='N' else sizes[0], context=ctx, dtype=datatype)
A = vcl.Matrix(sizes if A_trans=='N' else sizes[::-1], context=ctx, dtype=datatype, layout=vcl.COL_MAJOR) y = vcl.Vector(sizes[0] if A_trans=='N' else sizes[1], context=ctx, dtype=datatype)
x = vcl.Vector(sizes[1] if A_trans=='N' else sizes[0], context=ctx, dtype=datatype) LHS = A if A_trans=='N' else A.T
y = vcl.Vector(sizes[0] if A_trans=='N' else sizes[1], context=ctx, dtype=datatype) return execute(device, vcl.Assign(y, LHS*x), (), sizes, fname, parameters)
LHS = A if A_trans=='N' else A.T tune(execution_handler, 30, 1000, lambda : 64*np.random.randint(low=5, high=100, size=2), (A_trans,))
return execute(device, vcl.Assign(y, LHS*x), (), sizes, fname, parameters) #Matrix Product
tune(execution_handler, 50, 1000, lambda : 64*np.random.randint(low=5, high=100, size=2), (A_trans,)) if operation=='matrix-product':
#Matrix Product layouts = ['NN', 'NT', 'TN', 'TT']
if operation=='matrix-product': for layout in layouts:
layouts = map_to_list(str,p['layout']) def execution_handler(sizes, fname=os.devnull, parameters=None):
if 'all' in layouts: A_trans = layout[0]
layouts = ['NN', 'NT', 'TN', 'TT'] B_trans = layout[1]
for layout in layouts: A = vcl.Matrix((sizes[0], sizes[1]) if A_trans=='N' else (sizes[1],sizes[0]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR);
def execution_handler(sizes, fname=os.devnull, parameters=None): B = vcl.Matrix((sizes[1], sizes[2]) if B_trans=='N' else (sizes[2],sizes[1]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR);
A_trans = layout[0] LHS = A if A_trans=='N' else A.T
B_trans = layout[1] RHS = B if B_trans=='N' else B.T
A = vcl.Matrix((sizes[0], sizes[1]) if A_trans=='N' else (sizes[1],sizes[0]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR); alpha = vcl.HostScalar(1.0, context=ctx, dtype = datatype)
B = vcl.Matrix((sizes[1], sizes[2]) if B_trans=='N' else (sizes[2],sizes[1]), context=ctx, dtype=datatype, layout=vcl.COL_MAJOR); beta = vcl.HostScalar(1.0, context=ctx, dtype = datatype)
LHS = A if A_trans=='N' else A.T C = vcl.Matrix((sizes[0], sizes[2]), context=ctx, dtype = datatype, layout=vcl.COL_MAJOR)
RHS = B if B_trans=='N' else B.T return execute(device, vcl.Assign(C,LHS*RHS*alpha + C*beta),(A_trans, B_trans), sizes, fname, parameters)
alpha = vcl.HostScalar(1.0, context=ctx, dtype = datatype) tune(execution_handler, 30, 1000, lambda : 64*np.random.randint(low=1, high=40, size=3),(layout[0], layout[1]))
beta = vcl.HostScalar(1.0, context=ctx, dtype = datatype)
C = vcl.Matrix((sizes[0], sizes[2]), context=ctx, dtype = datatype, layout=vcl.COL_MAJOR)
return execute(device, vcl.Assign(C,LHS*RHS*alpha + C*beta),(A_trans, B_trans), sizes, fname, parameters)
tune(execution_handler, 50, 2000, lambda : 64*np.random.randint(low=1, high=40, size=3),(layout[0], layout[1]))
dname = misc_tools.sanitize_string(device.name) dname = misc_tools.sanitize_string(device.name)
json_out["version"] = "1.0" json_out["version"] = "1.0"
@@ -169,9 +160,16 @@ if __name__ == "__main__":
subparsers = parser.add_subparsers(dest='action') subparsers = parser.add_subparsers(dest='action')
print_devices_parser = subparsers.add_parser('list-devices', help='list the devices available') print_devices_parser = subparsers.add_parser('list-devices', help='list the devices available')
tune_parser = subparsers.add_parser('tune', help='tune using a specific configuration file') tune_parser = subparsers.add_parser('tune', help='tune using a specific configuration file')
tune_parser.add_argument("--config", default="config.ini", required=False, type=str)
tune_parser.add_argument("--device", default=0, required=False, type=str) tune_parser.add_argument("--device", default=0, required=False, type=str)
tune_parser.add_argument("--viennacl-root", default='', required=False, type=str)
tune_subparsers = tune_parser.add_subparsers(dest='method')
big_sizes_parser = tune_subparsers.add_parser('unique', help = 'Tune each operation for unique sizes')
big_sizes_parser.add_argument("--sizes", nargs='+', default=[10e6,2560,2560,1536,1536,1536], required=False, type=int, help = '6 = 1 + 2 + 3 sizes for respectively BLAS1, BLAS2, BLAS3')
big_sizes_parser.add_argument("--viennacl-src-path", default='', required=False, type=str)
model_parser = tune_subparsers.add_parser('build-model', help = 'Build an input-dependent model')
args = parser.parse_args() args = parser.parse_args()
devices = [d for platform in cl.get_platforms() for d in platform.get_devices()] devices = [d for platform in cl.get_platforms() for d in platform.get_devices()]
@@ -186,4 +184,4 @@ if __name__ == "__main__":
print("------") print("------")
print("Auto-tuning") print("Auto-tuning")
print("------") print("------")
do_tuning(args.config, args.viennacl_root, devices[args.device]) do_tuning(args, devices)