Packaging: polished

This commit is contained in:
Philippe Tillet
2015-04-30 00:46:42 -04:00
parent 5ef01f041a
commit 006d0f13de
4061 changed files with 20266 additions and 559 deletions

View File

@@ -1,6 +1,17 @@
find_program(PYTHON "python")
file( GLOB_RECURSE PYTHON_SRC *.cpp)
file( GLOB_RECURSE PYTHON_PYSRC *.py)
add_custom_target( MAKE_PYTHON_SRC_VISIBLE SOURCES ${PYTHON_SRC} ${PYTHON_PYSRC})
if(PYTHON)
add_subdirectory(pyisaac)
add_subdirectory(autotune)
endif()
set(SETUP_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/setup.py")
set(SETUP_PY "${CMAKE_CURRENT_BINARY_DIR}/setup.py")
set(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/build")
configure_file(${SETUP_PY_IN} ${SETUP_PY})
add_custom_command(OUTPUT ${OUTPUT}/timestamp
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/isaac ${CMAKE_CURRENT_BINARY_DIR}/isaac
COMMAND ${PYTHON} ${CMAKE_CURRENT_BINARY_DIR}/setup.py build
COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT}/timestamp
DEPENDS isaac ${PYTHON_SRC} ${PYTHON_PYSRC} ${SETUP_PY})
add_custom_target(python ALL DEPENDS ${OUTPUT}/timestamp)
install(CODE "execute_process(WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${PYTHON} ${SETUP_PY} install)")

View File

@@ -1,18 +0,0 @@
find_program(PYINSTALLER pyinstaller)
if(PYINSTALLER)
set(SPEC_IN "${CMAKE_CURRENT_SOURCE_DIR}/pyinstaller_build.spec")
set(SPEC "${CMAKE_CURRENT_BINARY_DIR}/pyinstaller_build.spec")
set(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/build/timestamp")
file(GLOB DEPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/pysrc/*.py")
LIST(APPEND DEPS "${CMAKE_CURRENT_SOURCE_DIR}/pyinstaller_build.spec")
configure_file(${SPEC_IN} ${SPEC})
add_custom_command(OUTPUT ${OUTPUT}
COMMAND ${PYINSTALLER} ${SPEC_IN} ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT}
DEPENDS ${DEPS} pyisaac)
add_custom_target(autotune ALL DEPENDS ${OUTPUT})
endif()

View File

@@ -1,74 +0,0 @@
Metadata-Version: 1.1
Name: pyopencl
Version: 2014.1
Summary: Python wrapper for OpenCL
Home-page: http://mathema.tician.de/software/pyopencl
Author: Andreas Kloeckner
Author-email: inform@tiker.net
License: MIT
Description: PyOpenCL lets you access GPUs and other massively parallel compute
devices from Python. It tries to offer computing goodness in the
spirit of its sister project `PyCUDA <http://mathema.tician.de/software/pycuda>`_:
* Object cleanup tied to lifetime of objects. This idiom, often
called
`RAII <http://en.wikipedia.org/wiki/Resource_Acquisition_Is_Initialization>`_
in C++, makes it much easier to write correct, leak- and
crash-free code.
* Completeness. PyOpenCL puts the full power of OpenCL's API at
your disposal, if you wish. Every obscure `get_info()` query and
all CL calls are accessible.
* Automatic Error Checking. All CL errors are automatically
translated into Python exceptions.
* Speed. PyOpenCL's base layer is written in C++, so all the niceties
above are virtually free.
* Helpful and complete `Documentation <http://documen.tician.de/pyopencl>`_
as well as a `Wiki <http://wiki.tiker.net/PyOpenCL>`_.
* Liberal license. PyOpenCL is open-source under the
`MIT license <http://en.wikipedia.org/wiki/MIT_License>`_
and free for commercial, academic, and private use.
* Broad support. PyOpenCL was tested and works with Apple's, AMD's, and Nvidia's
CL implementations.
To use PyOpenCL, you just need `numpy <http://numpy.org>`_ and an OpenCL
implementation.
(See this `howto <http://wiki.tiker.net/OpenCLHowTo>`_ for how to get one.)
Places on the web related to PyOpenCL:
* `Python package index <http://pypi.python.org/pypi/pyopencl>`_ (download releases)
.. image:: https://badge.fury.io/py/pyopencl.png
:target: http://pypi.python.org/pypi/pyopencl
* `C. Gohlke's Windows binaries <http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyopencl>`_ (download Windows binaries)
* `Github <http://github.com/pyopencl/pyopencl>`_ (get latest source code, file bugs)
* `Documentation <http://documen.tician.de/pyopencl>`_ (read how things work)
* `Wiki <http://wiki.tiker.net/PyOpenCL>`_ (read installation tips, get examples, read FAQ)
Platform: UNKNOWN
Classifier: Environment :: Console
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Other Audience
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: MIT License
Classifier: Natural Language :: English
Classifier: Programming Language :: C++
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Topic :: Scientific/Engineering
Classifier: Topic :: Scientific/Engineering :: Mathematics
Classifier: Topic :: Scientific/Engineering :: Physics

File diff suppressed because it is too large Load Diff

View File

@@ -1,55 +0,0 @@
../pyopencl/_mymako.py
../pyopencl/array.py
../pyopencl/algorithm.py
../pyopencl/version.py
../pyopencl/cache.py
../pyopencl/clrandom.py
../pyopencl/reduction.py
../pyopencl/ipython.py
../pyopencl/_cluda.py
../pyopencl/__init__.py
../pyopencl/scan.py
../pyopencl/capture_call.py
../pyopencl/tools.py
../pyopencl/clmath.py
../pyopencl/elementwise.py
../pyopencl/characterize/performance.py
../pyopencl/characterize/__init__.py
../pyopencl/compyte/dtypes.py
../pyopencl/compyte/array.py
../pyopencl/compyte/__init__.py
../pyopencl/cl/pyopencl-ranluxcl.cl
../pyopencl/cl/pyopencl-airy.cl
../pyopencl/cl/pyopencl-eval-tbl.cl
../pyopencl/cl/pyopencl-bessel-y.cl
../pyopencl/cl/pyopencl-bessel-j.cl
../pyopencl/cl/pyopencl-complex.h
../pyopencl/_mymako.pyc
../pyopencl/array.pyc
../pyopencl/algorithm.pyc
../pyopencl/version.pyc
../pyopencl/cache.pyc
../pyopencl/clrandom.pyc
../pyopencl/reduction.pyc
../pyopencl/ipython.pyc
../pyopencl/_cluda.pyc
../pyopencl/__init__.pyc
../pyopencl/scan.pyc
../pyopencl/capture_call.pyc
../pyopencl/tools.pyc
../pyopencl/clmath.pyc
../pyopencl/elementwise.pyc
../pyopencl/characterize/performance.pyc
../pyopencl/characterize/__init__.pyc
../pyopencl/compyte/dtypes.pyc
../pyopencl/compyte/array.pyc
../pyopencl/compyte/__init__.pyc
../pyopencl/_cl.so
../pyopencl/_pvt_struct.so
./
dependency_links.txt
SOURCES.txt
top_level.txt
requires.txt
not-zip-safe
PKG-INFO

View File

@@ -1,3 +0,0 @@
pytools>=2014.2
pytest>=2
decorator>=3.2.0

View File

@@ -1,3 +0,0 @@
_cl
_pvt_struct
pyopencl

View File

@@ -1,32 +0,0 @@
#!/usr/bin/env
import os, sys
prefix = sys.argv[2]
sys.path.append('/home/philippe/Development/ATIDLAS/build/python/pyatidlas/build/lib.linux-x86_64-2.7/')
sys.path.append(os.path.join(prefix, 'pysrc'))
a = Analysis([os.path.join(prefix, 'pysrc','autotune.py')],
hiddenimports=['scipy.sparse.csgraph._validation',
'scipy.special._ufuncs_cxx',
'scipy.sparse.linalg.dsolve.umfpack',
'scipy.integrate.vode',
'scipy.integrate.lsoda',
'sklearn.utils.sparsetools._graph_validation',
'sklearn.utils.sparsetools._graph_tools',
'sklearn.utils.lgamma',
'sklearn.tree._utils'],
hookspath=None,
excludes=['scipy.io.matlab','matplotlib','PyQt4'],
runtime_hooks=None)
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='autotune',
debug=False,
strip=None,
upx=True,
console=True )

View File

@@ -1,235 +0,0 @@
from __future__ import division
import argparse, itertools, os, sys, json
import misc_tools, optimize, dataset
import pyisaac as atd
import numpy as np
from numpy import random
from model import train_model
TYPES = { 'vaxpy': {'template':atd.vaxpy,
'perf-index':lambda x: 3*x[0]*x[1][0]/x[2]*1e-9,
'perf-measure':'GB/s'},
'maxpy': {'template':atd.maxpy,
'perf-index':lambda x: 3*x[0]*x[1][0]*x[1][1]/x[2]*1e-9,
'perf-measure':'GB/s'},
'dot': {'template':atd.reduction,
'perf-index':lambda x: 2*x[0]*x[1][0]/x[2]*1e-9,
'perf-measure':'GB/s'},
'gemv': {'template': {'N': atd.mreduction_rows, 'T': atd.mreduction_cols},
'perf-index':lambda x: x[0]*x[1][0]*x[1][1]/x[2]*1e-9,
'perf-measure':'GB/s'},
'gemm': {'template': {('N','N'): atd.mproduct_nn, ('T','N'): atd.mproduct_tn,
('N','T'): atd.mproduct_nt, ('T','T'): atd.mproduct_tt},
'perf-index': lambda x: 2*x[1][0]*x[1][1]*x[1][2]/x[2]*1e-9,
'perf-measure': 'GFLOP/s'} }
def do_tuning(args):
device = args.device
context = atd.context(device)
context.queues.append(atd.command_queue(context, device))
if os.path.isfile(args.out):
json_out = json.load(open(args.out, 'r'))
else:
json_out = {}
json_out["version"] = "1.0"
def map_to_list(T, x):
return list(map(T, x if isinstance(x, list) else [x]))
if(args.method=='simple'):
default_tuning_sizes = {'vaxpy': args.blas1_size, 'dot': args.blas1_size,
'maxpy' : args.blas2_size, 'gemv' : args.blas2_size,
'gemm': args.blas3_size}
for operation in ['vaxpy', 'dot', 'maxpy', 'gemv', 'gemm']:
for datatype in [atd.float32, atd.float64]:
dtypestr = datatype.__name__
if operation not in args.operations and operation + '-' + dtypestr not in args.operations:
continue
#Check data-type
if datatype is atd.float64 and not device.double_fp_config:
sys.stderr.write('Warning : The device ' + device.name + ' does not support double precision! Skipping ...')
continue
#~ #Helper for execution
def execute(symbolic, sizes, Template, parameters = None, fname = os.devnull):
if parameters is not None:
return misc_tools.benchmark(Template(*parameters), symbolic)
with open(fname, "w+") as archive:
return optimize.genetic(symbolic, Template, lambda t: TYPES[operation]['perf-index']([datatype(0).size, sizes, t]),
TYPES[operation]['perf-measure'], archive)
def log_spaced_points(a,b,N,r=128):
t = np.ceil(np.exp(np.linspace(np.log(a), np.log(b), N))/r)*r
return t.reshape(t.size,1).astype(int)
#Helper for tuning
def tune(execution_handler, layouts, tuning_sizes, training_sizes):
print('-----')
print(' '.join(map(str, ("Now tuning:", dtypestr, '-', operation, '-'.join(layouts), '[' + device.name, '(' + device.platform.name + ')]'))))
#Update JSON
full_operation = operation + ''.join(layouts)
prefix = os.path.join('data',os.path.join(full_operation,dtypestr))
if not os.path.exists(prefix):
os.makedirs(prefix)
if full_operation not in json_out:
json_out[full_operation] = {}
json_out[full_operation][dtypestr] = {}
D = json_out[full_operation][dtypestr]
if args.method == 'simple':
print 'Size : ', ','.join(map(str, default_tuning_sizes[operation]))
profiles = [execution_handler(map(int,default_tuning_sizes[operation]))]
else:
def compute_perf(x, t):
return TYPES[operation]['perf-index']([datatype(0).size, x, t])
#profiles = dataset.sample_profiles(execution_handler, tuning_sizes)
if args.build_model:
#X, Y, profiles = dataset.sample_dataset(prefix, profiles, execution_handler, training_sizes)
profiles = np.loadtxt(prefix+'/profiles.csv')
X = np.loadtxt(prefix+'/X.csv',ndmin=2)
Y = np.loadtxt(prefix+'/Y.csv',ndmin=2)
clf = train_model(X, Y, profiles, compute_perf, TYPES[operation]['perf-measure'])
D['predictor'] = [{'children_left': e.tree_.children_left.tolist(),
'children_right': e.tree_.children_right.tolist(),
'threshold': e.tree_.threshold.astype('float64').tolist(),
'feature': e.tree_.feature.astype('float64').tolist(),
'value': e.tree_.value[:,:,0].astype('float64').tolist()} for e in clf.estimators_]
D['profiles'] = [map(int, x) for x in profiles]
Template = TYPES[operation]['template']
#Vector AXPY
if operation=='vaxpy':
def execution_handler(sizes, fname=os.devnull, parameters=None):
x = atd.empty(sizes[0], datatype, context=context)
y = atd.empty(sizes[0], datatype, context=context)
return execute(x + y, sizes, Template, parameters, fname)
tune(execution_handler, (), log_spaced_points(1e4, 1e7, 20), log_spaced_points(1e4, 1e7, 1000))
#Dot
if operation=='dot':
def execution_handler(sizes, fname=os.devnull, parameters=None):
x = atd.empty(sizes[0], datatype, context=context)
y = atd.empty(sizes[0], datatype, context=context)
s = atd.scalar(datatype)
return execute(atd.dot(x, y), sizes, Template, parameters, fname)
tune(execution_handler, (), log_spaced_points(1e4, 1e7, 50), log_spaced_points(1e4, 1e7, 1000))
#Matrix AXPY
if operation=='maxpy':
def execution_handler(sizes, fname=os.devnull, parameters=None):
A = atd.empty(sizes, datatype, context=context)
C = atd.empty(sizes, datatype, context=context)
return execute(A + C, sizes, Template, parameters, fname)
tune(execution_handler, 64, 5000, 2, (),'log', 'log')
#Row-wise dot
if operation=='gemv':
for A_trans in args.gemv_layouts:
def execution_handler(sizes, fname=os.devnull, parameters=None):
A = atd.empty(sizes if A_trans=='N' else sizes[::-1], datatype, context=context)
x = atd.empty(sizes[1], datatype, context=context)
LHS = A if A_trans=='N' else A.T
return execute(atd.dot(LHS, x), sizes, Template[A_trans], parameters, fname)
tuning_sizes = itertools.chain( itertools.product([128, 512, 2048, 8192], [128, 512, 2048, 8192]),
itertools.product([128, 512, 2048, 8192], [16384, 32768, 65536]),
itertools.product([16384, 32768, 65536], [128, 512, 2048, 8192]))
training_sizes = itertools.chain( itertools.product([2**k for k in range(4, 13)], [2**k for k in range(4, 13)]),
itertools.product([2**k for k in range(4, 13)], [2**k for k in range(13, 17)]),
itertools.product([2**k for k in range(13, 17)], [2**k for k in range(4, 13)]))
tune(execution_handler, (A_trans,), tuning_sizes, training_sizes)
#Matrix Product
if operation=='gemm':
for L in args.gemm_layouts:
A_trans = L[0]
B_trans = L[1]
def execution_handler(sizes, fname=os.devnull, parameters=None):
A = atd.empty((sizes[0], sizes[2]) if A_trans=='N' else (sizes[2], sizes[0]), datatype, context=context)
B = atd.empty((sizes[2], sizes[1]) if B_trans=='N' else (sizes[1], sizes[2]), datatype, context=context)
LHS = A if A_trans=='N' else A.T
RHS = B if B_trans=='N' else B.T
return execute(atd.dot(LHS, RHS), sizes, Template[(A_trans, B_trans)], parameters, fname)
tuning_sizes = itertools.product([64, 256, 1024, 2560], [64, 256, 1024, 2560], [256, 2560, 32768, 65536])
training_sizes = itertools.product([2**k for k in range(6, 13)], [2**k for k in range(6, 13)], [2**k for k in range(6, 17)])
tune(execution_handler,(A_trans,B_trans), tuning_sizes, training_sizes)
json.dump(json_out, open(args.out,'w'))
class ArgumentsHandler:
def __init__(self, devices):
#Command line arguments
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='action')
print_devices_parser = subparsers.add_parser('list-devices', help='List the devices available')
tune_parser = subparsers.add_parser('tune', help='Auto-tuning')
tune_parser.add_argument("--device", default=0, type=int)
tune_parser.add_argument("--operations", default = 'vaxpy,maxpy,dot,gemv,gemm-float32', type=str)
tune_parser.add_argument("--gemm-layouts", default='NN,NT,TN,TT', type=str)
tune_parser.add_argument("--gemv-layouts", default='N,T', type=str)
tune_parser.add_argument("--out", default='', type=str)
tune_parser.add_argument("--viennacl-src-path", default='', type=str)
tune_subparsers = tune_parser.add_subparsers(dest='method')
simple_parser = tune_subparsers.add_parser('simple', help = 'Tune each operation for unique sizes')
simple_parser.add_argument("--blas1-size", default = 10e6, type=int)
simple_parser.add_argument("--blas2-size", nargs=2, default=[2560,2560], type=int)
simple_parser.add_argument("--blas3-size", nargs=3, default=[1536,1536,1536],type=int)
full_parser = tune_subparsers.add_parser('full', help = 'Tune each operation for randomly chosen sizes')
full_parser.add_argument("--build-model", default=True, type=bool)
full_parser.add_argument("--sample-size", default=64, type=int)
args = parser.parse_args()
self.__dict__ = args.__dict__.copy()
if self.action == 'tune':
#Retypes
self.device = devices[int(self.device)]
if not self.out:
self.out = misc_tools.sanitize_string(self.device.name) + '.json'
self.operations = self.operations.split(',')
self.gemm_layouts = self.gemm_layouts.split(',')
self.gemv_layouts = self.gemv_layouts.split(',')
if self.method == 'simple':
self.blas1_size = [int(float(self.blas1_size))]
self.blas2_size = map(int, self.blas2_size)
self.blas3_size = map(int, self.blas3_size)
if __name__ == "__main__":
atd.state.queue_properties = atd.CL_QUEUE_PROFILING_ENABLE
platforms = atd.get_platforms()
devices = [d for platform in platforms for d in platform.get_devices()]
args = ArgumentsHandler(devices)
print("----------------")
print("Devices available:")
print("----------------")
for (i, d) in enumerate(devices):
print 'Device', i, '|', atd.device_type_to_string(d.type), '|', d.name, 'on', d.platform.name
print("----------------")
if args.action=='tune':
print("------")
print("Auto-tuning")
print("------")
do_tuning(args)

View File

@@ -1,57 +0,0 @@
import os
import sys
import re
import random
import numpy as np
def sample_profiles(execution_handler, generator):
print "Sampling profiles..."
t = np.empty(0)
profiles = []
for i, x in enumerate(generator):
print x
if i==0:
X = np.empty((0,len(x)))
try:
y = execution_handler(x)
except:
continue
if y not in profiles:
profiles.append(y)
idx = profiles.index(y)
X = np.vstack((X, x))
t = np.append(t, idx)
idx = int(t[np.argmax(np.linalg.norm(X, axis=1))])
profiles = [profiles[idx]] + [x for i,x in enumerate(profiles) if i!=idx]
return profiles
def sample_dataset(prefix_name, profiles, execution_handler, generator):
P = len(profiles)
print "Generating the dataset..."
Y = np.empty((0, P))
for i,x in enumerate(generator):
if i==0:
X = np.empty((0,len(x)))
new_y = np.zeros(P)
for j,y in enumerate(profiles):
try:
new_y[j] = execution_handler(x, os.devnull, y)
except:
new_y[j] = float('inf')
X = np.vstack((X, x))
Y = np.vstack((Y, new_y))
if i%10==0:
sys.stdout.write('%d data points generated\r'%i)
sys.stdout.flush()
idx = np.argsort(Y[np.argmax(np.linalg.norm(X, axis=1)),:])
Y = Y[:, idx]
profiles = [profiles[i] for i in idx]
if not os.path.exists(prefix_name):
os.makedirs(prefix_name)
np.savetxt(os.path.join(prefix_name,"X.csv"), X)
np.savetxt(os.path.join(prefix_name,"Y.csv"), Y)
np.savetxt(os.path.join(prefix_name,"profiles.csv"), profiles)
return X, Y, profiles

View File

@@ -1,205 +0,0 @@
import random, time, sys, copy
import misc_tools
import numpy as np
import pyisaac as atd
from deap import algorithms
from deap import base
from deap import creator
from deap import tools as deap_tools
from collections import OrderedDict as odict
def closest_divisor(N, x):
x_low=x_high=max(1,min(round(x),N))
while N % x_low > 0 and x_low>0:
x_low = x_low - 1
while N % x_high > 0 and x_high < N:
x_high = x_high + 1
return x_low if x - x_low < x_high - x else x_high
def b_gray_to_bin(A='00000000', endian='big'):
assert type(endian) is str
assert endian == 'little' or endian == 'big'
if endian == 'little': A = A[::-1] # Make sure endianness is big before conversion
b = A[0]
for i in range(1, len(A)): b += str( int(b[i-1] != A[i]) )
if endian == 'little': b = b[::-1] # Convert back to little endian if necessary
return b
class GeneticOperators(object):
class Pow2(object):
def __init__(self, v):
self.value = v
@property
def decoded():
return 2**self.value
def __init__(self, symbolic, Template, out):
self.device = symbolic.context.queues[0].device
self.symbolic = symbolic
self.Template = Template
self.cache = {}
self.out = out
self.genome_info = {
atd.vaxpy: [2,4,4,atd.fetching_policy_type],
atd.reduction: [2,4,4,atd.fetching_policy_type],
atd.maxpy: [2,3,3,3,3,atd.fetching_policy_type],
atd.mreduction_rows: [2,3,3,3,3,atd.fetching_policy_type],
atd.mreduction_cols: [2,3,3,3,3,atd.fetching_policy_type],
atd.mproduct_nn: [2,3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3],
atd.mproduct_nt: [2,3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3],
atd.mproduct_tn: [2,3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3],
atd.mproduct_tt: [2,3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3]
}[Template]
self.indpb = 1.0/sum([1 if x==atd.fetching_policy_type else x for x in self.genome_info])
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
self.toolbox = base.Toolbox()
self.toolbox.register("population", self.init)
self.toolbox.register("evaluate", self.evaluate)
self.toolbox.register("mate", deap_tools.cxTwoPoint)
self.toolbox.register("mutate", self.mutate)
self.toolbox.register("select", deap_tools.selNSGA2)
def decode(self, genome):
fetching_policy_type = atd.fetching_policy_type
fetch = [fetching_policy_type.FETCH_FROM_LOCAL, fetching_policy_type.FETCH_FROM_GLOBAL_STRIDED, fetching_policy_type.FETCH_FROM_GLOBAL_CONTIGUOUS]
is_gemm = self.Template in [atd.mproduct_nn, atd.mproduct_nt, atd.mproduct_tn, atd.mproduct_tt]
result = []
offset = 0
for i, x in enumerate(self.genome_info):
if x==atd.fetching_policy_type:
result.append(fetch[genome[offset]])
offset = offset + 1
else:
decoded = int(b_gray_to_bin(''.join(genome[offset:offset+x])), 2)
result.append(decoded if is_gemm and i in [11, 12] else 2**decoded)
offset = offset + x
#GEMM peculiarities
if is_gemm:
if fetching_policy_type.FETCH_FROM_LOCAL in result:
lf1 = result[1]*result[3]/result[10]
else:
result[10] = 0
lf1 = 0
result.append(lf1)
return result
def init(self, N):
result = []
allowed_idx = [0] if self.Template in [atd.mproduct_nn, atd.mproduct_nt, atd.mproduct_tn, atd.mproduct_tt] else [1,2]
for idx in allowed_idx:
current = []
while len(current) < N/len(allowed_idx):
while True:
bincode = []
for i, x in enumerate(self.genome_info):
if x==atd.fetching_policy_type:
bincode = bincode + [idx]
else:
bincode = bincode + [str(random.randint(0,1)) for i in range(x)]
parameters = self.decode(bincode)
template = self.Template(*parameters)
array_expressions = atd.array_expression_container(self.symbolic)
registers_usage = template.registers_usage(array_expressions)/4
lmem_usage = template.lmem_usage(array_expressions)
local_size = parameters[1]*parameters[3]
occupancy_record = misc_tools.OccupancyRecord(self.device, local_size, lmem_usage, registers_usage)
if not misc_tools.skip(template, self.symbolic):
current.append(creator.Individual(bincode))
break
result = result + current
return result
def mutate(self, individual):
while True:
new_individual = copy.deepcopy(individual)
for i in range(len(new_individual)):
if isinstance(individual[i], int) and random.random() < 0.1:
while new_individual[i] == individual[i]:
new_individual[i] = random.randint(0, 2)
elif not isinstance(individual[i], int) and random.random() < self.indpb:
new_individual[i] = '1' if new_individual[i]=='0' else '0'
parameters = self.decode(new_individual)
template = self.Template(*parameters)
if not misc_tools.skip(template, self.symbolic):
break
return new_individual,
def evaluate(self, individual):
if tuple(individual) not in self.cache:
parameters = self.decode(individual)
template = self.Template(*parameters)
tt = misc_tools.benchmark(template, self.symbolic)
self.out.write(','.join([str(tt)]+map(str,map(int,parameters)))+'\n')
self.cache[tuple(individual)] = tt
return self.cache[tuple(individual)],
def optimize(self, maxtime, maxgen, compute_perf, perf_metric):
hof = deap_tools.HallOfFame(1)
# Begin the generational process
gen = 0
maxtime = time.strptime(maxtime, '%Mm%Ss')
maxtime = maxtime.tm_min*60 + maxtime.tm_sec
start_time = time.time()
mu = 30
cxpb = 0.2
mutpb = 0.7
population = self.init(mu)
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = self.toolbox.map(self.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
hof.update(population)
while time.time() - start_time < maxtime and gen < maxgen:
# Vary the population
offspring = []
for _ in xrange(mu):
op_choice = random.random()
if op_choice < cxpb: # Apply crossover
while True:
ind1, ind2 = map(self.toolbox.clone, random.sample(population, 2))
ind1, ind2 = self.toolbox.mate(ind1, ind2)
del ind1.fitness.values
parameters = self.decode(ind1)
template = self.Template(*parameters)
if not misc_tools.skip(template, self.symbolic):
break
offspring.append(ind1)
elif op_choice < cxpb + mutpb: # Apply mutation
ind = self.toolbox.clone(random.choice(population))
ind, = self.toolbox.mutate(ind)
del ind.fitness.values
offspring.append(ind)
else: # Apply reproduction
offspring.append(random.choice(population))
#for x in offspring:
#print self.decode(x)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = self.toolbox.map(self.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
hof.update(offspring)
# Select the next generation population
population[:] = self.toolbox.select(population + offspring, mu)
#Update
gen = gen + 1
best_profile = '(%s)'%','.join(map(str,self.decode(hof[0])))
best_performance = compute_perf(hof[0].fitness.values[0])
sys.stdout.write('Generation %d | Time %d | Best %d %s [ for %s ]\r'%(gen, time.time() - start_time, best_performance, perf_metric, best_profile))
sys.stdout.flush()
sys.stdout.write('\n')
return self.decode(hof[0])

View File

@@ -1,246 +0,0 @@
from __future__ import division
import time
import os
import sys
import pyisaac as atd
import numpy as np
class PhysicalLimitsNV:
def __init__(self, dev):
self.compute_capability = dev.nv_compute_capability
if self.compute_capability[0]==1:
if self.compute_capability[1]<=1:
self.warps_per_mp = 24
self.threads_per_mp = 768
self.num_32b_reg_per_mp = 8192
self.reg_alloc_unit_size = 256
else:
self.warps_per_mp = 32
self.threads_per_mp = 1024
self.num_32b_reg_per_mp = 16384
self.reg_alloc_unit_size = 512
self.threads_per_warp = 32
self.thread_blocks_per_mp = 8
self.reg_alloc_granularity = 'block'
self.reg_per_thread = 124
self.shared_mem_per_mp = 16384
self.shared_mem_alloc_unit_size = 512
self.warp_alloc_granularity = 2
self.max_thread_block_size = 512
elif self.compute_capability[0]==2:
self.threads_per_warp = 32
self.warps_per_mp = 48
self.threads_per_mp = 1536
self.thread_blocks_per_mp = 8
self.num_32b_reg_per_mp = 32768
self.reg_alloc_unit_size = 64
self.reg_alloc_granularity = 'warp'
self.reg_per_thread = 63
self.shared_mem_per_mp = 49152
self.shared_mem_alloc_unit_size = 128
self.warp_alloc_granularity = 2
self.max_thread_block_size = 1024
elif self.compute_capability[0]==3:
self.threads_per_warp = 32
self.warps_per_mp = 64
self.threads_per_mp = 2048
self.thread_blocks_per_mp = 16
self.num_32b_reg_per_mp = 65536
self.reg_alloc_unit_size = 256
self.reg_alloc_granularity = 'warp'
if(self.compute_capability[1]==5):
self.reg_per_thread = 255
else:
self.reg_per_thread = 63
self.shared_mem_per_mp = 49152
self.shared_mem_alloc_unit_size = 256
self.warp_alloc_granularity = 4
self.max_thread_block_size = 1024
elif self.compute_capability[0]==5: #[KR]: copy-pasted from Kepler and adjusted according to http://en.wikipedia.org/wiki/CUDA
self.threads_per_warp = 32
self.warps_per_mp = 64
self.threads_per_mp = 2048
self.thread_blocks_per_mp = 32
self.num_32b_reg_per_mp = 65536
self.reg_alloc_unit_size = 256
self.reg_alloc_granularity = 'warp'
self.reg_per_thread = 255
self.shared_mem_per_mp = 65536
self.shared_mem_alloc_unit_size = 256
self.warp_alloc_granularity = 4
self.max_thread_block_size = 1024
else:
raise Exception('Compute capability not supported!')
class PhysicalLimitsAMD:
def __init__(self, dev):
infos =\
{
#APU:
'Devastator': {'arch': 'VLIW', 'WFmax_cu': 96, 'LDS_cu': 32768, 'GPR_cu': 8192},
'Scrapper': {'arch': 'VLIW', 'WFmax_cu': 96, 'LDS_cu': 32768, 'GPR_cu': 8192},
#HD5000
'Cedar': {'arch': 'VLIW', 'WFmax_cu': 96, 'LDS_cu': 32768, 'GPR_cu': 8192},
'Redwood': {'arch': 'VLIW', 'WFmax_cu': 62, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Juniper': {'arch': 'VLIW', 'WFmax_cu': 24.8, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Cypress': {'arch': 'VLIW', 'WFmax_cu': 27.6, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Hemlock': {'arch': 'VLIW', 'WFmax_cu': 24.8, 'LDS_cu': 32768, 'GPR_cu': 16384},
#HD6000
'Seymour': {'arch': 'VLIW', 'WFmax_cu': 96, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Caicos': {'arch': 'VLIW', 'WFmax_cu': 96, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Turks': {'arch': 'VLIW', 'WFmax_cu': 41.3, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Whistler': {'arch': 'VLIW', 'WFmax_cu': 41.3, 'LDS_cu': 32768, 'GPR_cu': 16384},
'Barts': {'arch': 'VLIW', 'WFmax_cu': 49.6, 'LDS_cu': 32768, 'GPR_cu': 16384},
#HD7000
'Capeverde': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536},
'Pitcairn': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536},
'Bonaire': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536},
'Tahiti': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536},
#Rx 200
'Oland': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536},
'Tonga': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536},
'Hawaii': {'arch': 'GCN', 'WFmax_cu': 40, 'LDS_cu': 65536, 'GPR_cu': 65536}
}
self.WFsize = 64
self.WFmax_cu = infos[dev.name]['WFmax_cu']
self.LDS_cu = infos[dev.name]['LDS_cu']
self.GPR_cu = infos[dev.name]['GPR_cu']
self.arch = infos[dev.name]['arch']
pass
def _int_floor(value, multiple_of=1):
"""Round C{value} down to be a C{multiple_of} something."""
# Mimicks the Excel "floor" function (for code stolen from occupancy calculator)
from math import floor
return int(floor(value/multiple_of))*multiple_of
def _int_ceiling(value, multiple_of=1):
"""Round C{value} up to be a C{multiple_of} something."""
# Mimicks the Excel "floor" function (for code stolen from occupancy calculator)
from math import ceil
return int(ceil(value/multiple_of))*multiple_of
class OccupancyRecord:
def init_nvidia(self, dev, threads, shared_mem, registers):
pl = PhysicalLimitsNV(dev)
limits = []
allocated_warps = max(1,_int_ceiling(threads/pl.threads_per_warp))
max_warps_per_mp = pl.warps_per_mp
limits.append((min(pl.thread_blocks_per_mp, _int_floor(max_warps_per_mp/allocated_warps)), 'warps'))
if registers>0:
if registers > pl.reg_per_thread:
limits.append((0, 'registers'))
else:
allocated_regs = {'warp': allocated_warps,
'block': _int_ceiling(_int_ceiling(allocated_warps, pl.warp_alloc_granularity)*registers*pl.threads_per_warp,allocated_warps)}[pl.reg_alloc_granularity]
max_reg_per_mp = {'warp': _int_floor(pl.num_32b_reg_per_mp/_int_ceiling(registers*pl.threads_per_warp, pl.reg_alloc_unit_size), pl.warp_alloc_granularity),
'block':pl.num_32b_reg_per_mp}[pl.reg_alloc_granularity]
limits.append((_int_floor(max_reg_per_mp/allocated_regs), 'registers'))
if shared_mem>0:
allocated_shared_mem = _int_ceiling(shared_mem, pl.shared_mem_alloc_unit_size)
max_shared_mem_per_mp = pl.shared_mem_per_mp
limits.append((_int_floor(max_shared_mem_per_mp/allocated_shared_mem), 'shared memory'))
limit, limited_by = min(limits)
warps_per_mp = limit*allocated_warps
self.occupancy = 100*warps_per_mp/pl.warps_per_mp
def init_amd(self, dev, threads, shared_mem, NReg):
pl = PhysicalLimitsAMD(dev)
limits = {}
WFwg = _int_ceiling(threads/pl.WFsize)
#WFmax without constraint
if pl.arch=='VLIW':
limits['wg'] = pl.WFmax_cu if WFwg > pl.WFmax_cu else _int_floor(pl.WFmax_cu,WFwg)
else:
limits['wg'] = min(16*WFwg, pl.WFmax_cu)
#WFmax with LDS constraints
if shared_mem > 0:
WGmax = _int_floor(pl.LDS_cu/shared_mem)
limits['lds'] = WGmax*WFwg
#WFmax with GPR constraints
if NReg > 0:
#Amount of work group per CU
NRegWG = NReg*pl.WFsize*WFwg
WGmax = _int_floor(pl.GPR_cu/NRegWG)
limits['gpr'] = WFwg*WGmax
self.occupancy = 100.0*min(list(limits.values()))/pl.WFmax_cu
def __init__(self, dev, threads, shared_mem=0, registers=0):
vendor = dev.vendor
if vendor == atd.vendor.AMD:
self.init_amd(dev, threads, shared_mem, registers)
elif vendor == atd.vendor.NVIDIA:
self.init_nvidia(dev, threads, shared_mem, registers)
elif vendor == atd.vendor.INTEL:
if registers>128:
self.occupancy = 0
else:
self.occupancy = 100
def skip(template, symbolic):
device = symbolic.context.queues[0].device
local_size = template.local_size_0*template.local_size_1
vendor = device.vendor
if vendor == atd.vendor.AMD and local_size%64!=0:
return True
elif vendor == atd.vendor.NVIDIA and local_size%32!=0:
return True
elif vendor == atd.vendor.INTEL in vendor and local_size%8!=0:
return True
array_expressions = atd.array_expression_container(symbolic)
registers_usage = template.registers_usage(array_expressions)/4
lmem_usage = template.lmem_usage(array_expressions)
occupancy_record = OccupancyRecord(device, local_size, lmem_usage, registers_usage)
if template.is_invalid(array_expressions, device) or occupancy_record.occupancy < 10:
return True
return False
def benchmark(template, symbolic):
queue = symbolic.context.queues[0]
device = queue.device
array_expressions = atd.array_expression_container(symbolic)
registers_usage = template.registers_usage(array_expressions)/4
lmem_usage = template.lmem_usage(array_expressions)
local_size = template.local_size_0*template.local_size_1
occupancy_record = OccupancyRecord(device, local_size, lmem_usage, registers_usage)
if occupancy_record.occupancy < 15 :
return float("inf")
else:
queue.models[template, atd.float32] = atd.model(atd.float32, template, queue)
timings = []
current_time = 0
x, events = atd.flush(symbolic)
symbolic.context.queues[0].synchronize()
while current_time < 1e-3:
x, events = atd.flush(symbolic)
symbolic.context.queues[0].synchronize()
timings.append(1e-9*sum([e.elapsed_time for e in events]))
current_time = current_time + timings[-1]
return np.max(timings)
def sanitize_string(string, keep_chars = ['_']):
string = string.replace(' ', '_').replace('-', '_').lower()
string = "".join(c for c in string if c.isalnum() or c in keep_chars).rstrip()
return string

View File

@@ -1,46 +0,0 @@
from sklearn import tree
from sklearn import ensemble
from sklearn.grid_search import GridSearchCV
import numpy as np
def gmean(a, axis=0, dtype=None):
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a,np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def nrmse(y_ground, y):
N = y.size
rmsd = np.sqrt(np.sum((y_ground - y)**2)/N)
return rmsd/(np.max(y_ground) - np.min(y_ground))
def train_model(X, Y, profiles, perf, metric):
p = np.random.permutation(X.shape[0])
X = X[p,:]
Y = Y[p,:]
Y = np.array([perf(xx, yy) for xx, yy in zip(X, Y)])
Y[np.isinf(Y)] = 0
#Train the model
cut = int(0.9*X.shape[0])
XTr, YTr = X[:cut,:], Y[:cut,:]
XCv, YCv = X[cut:,:], Y[cut:,:]
nrmses = {}
for N in range(1,20):
for depth in range(1,20):
clf = ensemble.RandomForestRegressor(N, max_depth=depth).fit(XTr, YTr)
t = np.argmax(clf.predict(XCv), axis = 1)
y = np.array([YCv[i,t[i]] for i in range(t.size)])
ground = np.max(YCv[:,:], axis=1)
nrmses[clf] = nrmse(ground, y)
clf = min(nrmses, key=nrmses.get)
print 'The optimal classifer has NRMSE = %.2g (%d estimators and the max depth is %d'%(nrmses[clf], clf.n_estimators, clf.max_depth)
return clf

View File

@@ -1,9 +0,0 @@
import array, random, itertools
import deap.tools
import numpy as np
from genetic import GeneticOperators
def genetic(symbolic, Template, compute_perf, perf_metric, out):
GA = GeneticOperators(symbolic, Template, out)
return GA.optimize(maxtime='5m0s', maxgen=10000, compute_perf=compute_perf, perf_metric=perf_metric)

Some files were not shown because too many files have changed in this diff Show More