Squashed feature branch:
* Added CUDA support * Performance improvements * API improvements * Added "depth" parameter to GEMM * Android cross-compilation
This commit is contained in:
@@ -1,11 +1,12 @@
|
||||
#include <iostream>
|
||||
#include "atidlas/backend/stream.h"
|
||||
#include "atidlas/backend/templates/mreduction.h"
|
||||
#include "atidlas/tools/to_string.hpp"
|
||||
#include "atidlas/tools/make_map.hpp"
|
||||
#include "atidlas/tools/make_vector.hpp"
|
||||
#include "isaac/backend/stream.h"
|
||||
#include "isaac/backend/keywords.h"
|
||||
#include "isaac/backend/templates/mreduction.h"
|
||||
#include "isaac/tools/to_string.hpp"
|
||||
#include "isaac/tools/make_map.hpp"
|
||||
#include "isaac/tools/make_vector.hpp"
|
||||
|
||||
namespace atidlas
|
||||
namespace isaac
|
||||
{
|
||||
|
||||
mreduction_parameters::mreduction_parameters(unsigned int _simd_width,
|
||||
@@ -14,7 +15,7 @@ mreduction_parameters::mreduction_parameters(unsigned int _simd_width,
|
||||
num_groups_0(_num_groups_0), num_groups_1(_num_groups_1), fetch_policy(_fetch_policy) { }
|
||||
|
||||
|
||||
int mreduction::check_invalid_impl(cl::Device const &, expressions_tuple const &) const
|
||||
int mreduction::is_invalid_impl(driver::Device const &, expressions_tuple const &) const
|
||||
{
|
||||
if (p_.fetch_policy==FETCH_FROM_LOCAL)
|
||||
return TEMPLATE_INVALID_FETCHING_POLICY_TYPE;
|
||||
@@ -26,31 +27,46 @@ unsigned int mreduction::lmem_usage() const
|
||||
return p_.local_size_0*(p_.local_size_1+1);
|
||||
}
|
||||
|
||||
std::string mreduction::generate_impl(unsigned int label, expressions_tuple const & expressions, std::vector<mapping_type> const & mappings, unsigned int simd_width, std::vector<mapped_mreduction*> const & exprs) const
|
||||
std::string mreduction::generate_impl(const char * suffix, expressions_tuple const & expressions, driver::Device const & device, std::vector<mapping_type> const & mappings) const
|
||||
{
|
||||
using tools::to_string;
|
||||
|
||||
std::vector<mapped_mreduction*> reductions;
|
||||
expressions_tuple::data_type::const_iterator sit;
|
||||
std::vector<mapping_type>::const_iterator mit;
|
||||
for (mit = mappings.begin(), sit = expressions.data().begin(); mit != mappings.end(); ++mit, ++sit)
|
||||
{
|
||||
array_expression const & first_expression = *expressions.data().front();
|
||||
std::vector<size_t> idx = filter_nodes(&is_reduction, first_expression, false);
|
||||
for (auto & elem : idx)
|
||||
reductions.push_back((mapped_mreduction*)(mit->at(mapping_key(elem, PARENT_NODE_TYPE)).get()));
|
||||
}
|
||||
|
||||
kernel_generation_stream stream;
|
||||
driver::backend_type backend = device.backend();
|
||||
std::string _size_t = size_type(device);
|
||||
|
||||
char kprefix[10];
|
||||
fill_kernel_name(kprefix, label, "d");
|
||||
char name[2][16] = {{"prod"}, {"reduce"}};
|
||||
strcat(name[0], suffix);
|
||||
strcat(name[1], suffix);
|
||||
|
||||
std::string arguments = "unsigned int M, unsigned int N, " ;
|
||||
for (const auto & e : exprs)
|
||||
std::string arguments = _size_t + " M, " + _size_t + " N, " ;
|
||||
for (const auto & e : reductions)
|
||||
{
|
||||
std::string numeric_type = numeric_type_to_string(lhs_most(e->array_expression().tree(), e->array_expression().root()).lhs.dtype);
|
||||
if (e->is_index_reduction())
|
||||
{
|
||||
arguments += e->process("__global unsigned int* #name_temp, ");
|
||||
arguments += e->process("__global " + to_string(numeric_type) + "* #name_temp_value,");
|
||||
arguments += e->process(Global(backend).get() + " unsigned int* #name_temp, ");
|
||||
arguments += e->process(Global(backend).get() + " " + to_string(numeric_type) + "* #name_temp_value,");
|
||||
}
|
||||
else
|
||||
arguments += e->process("__global " + to_string(numeric_type) + "* #name_temp, ");
|
||||
arguments += e->process(Global(backend).get() + " " + to_string(numeric_type) + "* #name_temp, ");
|
||||
}
|
||||
|
||||
stream << " __attribute__((reqd_work_group_size(" << p_.local_size_0 << "," << p_.local_size_1 << ",1)))" << std::endl;
|
||||
stream << "__kernel void " << kprefix << "0(" << arguments << generate_arguments("#scalartype", mappings, expressions) << ")" << std::endl;
|
||||
if(backend==driver::OPENCL)
|
||||
stream << " __attribute__((reqd_work_group_size(" << p_.local_size_0 << "," << p_.local_size_1 << ",1)))" << std::endl;
|
||||
|
||||
stream << KernelPrefix(backend) << " void " << name[0] << "(" << arguments << generate_arguments("#scalartype", device, mappings, expressions) << ")" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
@@ -63,33 +79,41 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
unsigned int local_size_0_ld = p_.local_size_0+1;
|
||||
std::string local_size_0_ld_str = to_string(local_size_0_ld);
|
||||
|
||||
for (const auto & e : exprs)
|
||||
stream << e->process("__local #scalartype #name_buf[" + to_string(p_.local_size_1*local_size_0_ld) + "];") << std::endl;
|
||||
for (const auto & e : reductions)
|
||||
stream << e->process(Local(backend).get() + " #scalartype #name_buf[" + to_string(p_.local_size_1*local_size_0_ld) + "];") << std::endl;
|
||||
|
||||
stream << "unsigned int lid0 = get_local_id(0);" << std::endl;
|
||||
stream << "unsigned int lid1 = get_local_id(1);" << std::endl;
|
||||
stream << "unsigned int upper_bound_1 = ( M +" << p_.local_size_1 - 1 << ")/" << p_.local_size_1 << "*" << p_.local_size_1 << ";" << std::endl;
|
||||
stream << "for(unsigned int r = get_global_id(1); r < upper_bound_1; r += get_global_size(1)){" << std::endl;
|
||||
stream << "" << _size_t << " lid0 = " << LocalIdx0(backend) << ";" << std::endl;
|
||||
stream << "" << _size_t << " gid0 = " << GlobalIdx0(backend) << ";" << std::endl;
|
||||
stream << "" << _size_t << " gpid0 = " << GroupIdx0(backend) << ";" << std::endl;
|
||||
stream << "" << _size_t << " gsize0 = " << GlobalSize0(backend) << ";" << std::endl;
|
||||
|
||||
stream << "" << _size_t << " lid1 = " << LocalIdx1(backend) <<";" << std::endl;
|
||||
stream << "" << _size_t << " gid1 = " << GlobalIdx1(backend) <<";" << std::endl;
|
||||
stream << "" << _size_t << " gpid1 = " << GroupIdx1(backend) << ";" << std::endl;
|
||||
stream << "" << _size_t << " gsize1 = " << GlobalSize1(backend) <<";" << std::endl;
|
||||
|
||||
stream << "" << _size_t << " upper_bound_1 = ( M +" << p_.local_size_1 - 1 << ")/" << p_.local_size_1 << "*" << p_.local_size_1 << ";" << std::endl;
|
||||
stream << "for(" << _size_t << " r = gid1; r < upper_bound_1; r += gsize1){" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (const auto & e : exprs)
|
||||
for (const auto & e : reductions)
|
||||
stream << e->process("#scalartype #name_acc = " + neutral_element((e)->root_op()) + ";") << std::endl;
|
||||
|
||||
stream << "if (r < M)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
element_wise_loop_1D(stream, p_.fetch_policy, simd_width, "c", "N", "get_global_id(0)", "get_global_size(0)", [&](unsigned int simd_width)
|
||||
element_wise_loop_1D(stream, p_.fetch_policy, p_.simd_width, "c", "N", "gid0", "gsize0", device, [&](unsigned int simd_width)
|
||||
{
|
||||
std::string data_type = append_width("#scalartype",simd_width);
|
||||
|
||||
for (const auto & e : exprs)
|
||||
for (const auto & e : reductions)
|
||||
{
|
||||
std::map<std::string, std::string> accessors;
|
||||
if(reduction_type_==REDUCE_COLUMNS)
|
||||
{
|
||||
accessors["array2"] = data_type + " #namereg = " + vload(simd_width, "c*#stride1", "#pointer + r*#ld")+";";
|
||||
accessors["repeat"] = data_type + " #namereg = " + vload(simd_width, "(c%#tuplearg0)*#stride", "#pointer + (r%#tuplearg1)*#stride ")+";";
|
||||
accessors["array2"] = data_type + " #namereg = " + vload(simd_width, "#scalartype", "c*#stride1", "#pointer + r*#ld", backend)+";";
|
||||
accessors["repeat"] = data_type + " #namereg = " + vload(simd_width, "#scalartype", "(c%#tuplearg0)*#stride", "#pointer + (r%#tuplearg1)*#stride ", backend)+";";
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -106,10 +130,10 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
str[0] = "#namereg";
|
||||
else
|
||||
for (unsigned int a = 0; a < simd_width; ++a)
|
||||
str[a] = append_simd_suffix("#namereg.s",a);
|
||||
str[a] = access_vector_type("#namereg",a);
|
||||
|
||||
|
||||
for (auto & elem : exprs)
|
||||
for (auto & elem : reductions)
|
||||
for (unsigned int a = 0; a < simd_width; ++a)
|
||||
{
|
||||
std::string value = elem->evaluate_recursive(LHS_NODE_TYPE, {{"array2", str[a]}, {"repeat", str[a]}, {"array0", "#namereg"}});
|
||||
@@ -122,20 +146,20 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
|
||||
for (auto & expr : exprs)
|
||||
for (auto & expr : reductions)
|
||||
stream << expr->process("#name_buf[lid1*" + local_size_0_ld_str + "+ lid0] = #name_acc;") << std::endl;
|
||||
|
||||
stream << "#pragma unroll" << std::endl;
|
||||
stream << "for(unsigned int stride = " << p_.local_size_0/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "for(" << _size_t << " stride = " << p_.local_size_0/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
stream << "barrier(CLK_LOCAL_MEM_FENCE); " << std::endl;
|
||||
stream << LocalBarrier(backend) << ";" << std::endl;
|
||||
stream << "if (lid0 < stride)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (auto & e : exprs)
|
||||
for (auto & e : reductions)
|
||||
if (e->is_index_reduction())
|
||||
compute_index_reduction(stream, e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
, e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
@@ -162,11 +186,11 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
}
|
||||
else
|
||||
{
|
||||
for (mapped_reduction const * e : exprs)
|
||||
for (mapped_reduction const * e : reductions)
|
||||
{
|
||||
if (e->is_index_reduction())
|
||||
stream << e->process("#name_temp_value[r + M*get_group_id(0)] = #name_buf_value[lid1*" + local_size_0_ld_str + "];") << std::endl;
|
||||
stream << e->process("#name_temp[r + M*get_group_id(0)] = #name_buf[lid1*" + local_size_0_ld_str + "];") << std::endl;
|
||||
stream << e->process("#name_temp_value[r + M*gpid0] = #name_buf_value[lid1*" + local_size_0_ld_str + "];") << std::endl;
|
||||
stream << e->process("#name_temp[r + M*gpid0] = #name_buf[lid1*" + local_size_0_ld_str + "];") << std::endl;
|
||||
}
|
||||
}
|
||||
stream.dec_tab();
|
||||
@@ -185,8 +209,10 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
////////////// Kernel 2
|
||||
////////////////////////////////////////
|
||||
|
||||
stream << " __attribute__((reqd_work_group_size(" << p_.local_size_0 << "," << p_.local_size_1 << ",1)))" << std::endl;
|
||||
stream << "__kernel void " << kprefix << "1(" << arguments << generate_arguments("#scalartype", mappings, expressions) << ")" << std::endl;
|
||||
if(backend==driver::OPENCL)
|
||||
stream << " __attribute__((reqd_work_group_size(" << p_.local_size_0 << "," << p_.local_size_1 << ",1)))" << std::endl;
|
||||
|
||||
stream << KernelPrefix(backend) << " void " << name[1] << "(" << arguments << generate_arguments("#scalartype", device, mappings, expressions) << ")" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
@@ -196,26 +222,32 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
{"array2", "#pointer += #start1 + #start2*#ld; "
|
||||
"#ld *= #nldstride; "}}, expressions, mappings);
|
||||
|
||||
for (const auto & e : exprs)
|
||||
stream << e->process("__local #scalartype #name_buf[" + to_string(p_.local_size_1*local_size_0_ld) + "];") << std::endl;
|
||||
for (const auto & e : reductions)
|
||||
stream << e->process(Local(backend).get() + " #scalartype #name_buf[" + to_string(p_.local_size_1*local_size_0_ld) + "];") << std::endl;
|
||||
|
||||
stream << "unsigned int lid0 = get_local_id(0);" << std::endl;
|
||||
stream << "unsigned int lid1 = get_local_id(1);" << std::endl;
|
||||
stream << "unsigned int upper_bound_1 = ( M +" << p_.local_size_1 - 1 << ")/" << p_.local_size_1 << "*" << p_.local_size_1 << ";" << std::endl;
|
||||
stream << "for(unsigned int r = get_global_id(1); r < upper_bound_1; r += get_global_size(1)){" << std::endl;
|
||||
stream << _size_t << " lid0 = " << LocalIdx0(backend) << ";" << std::endl;
|
||||
stream << _size_t << " lsize0 = " << LocalSize0(backend) << ";" << std::endl;
|
||||
|
||||
stream << _size_t << " lid1 = " << LocalIdx1(backend) <<";" << std::endl;
|
||||
stream << _size_t << " gid1 = " << GlobalIdx1(backend) <<";" << std::endl;
|
||||
stream << _size_t << " gsize1 = " << GlobalSize1(backend) <<";" << std::endl;
|
||||
|
||||
|
||||
stream << _size_t << " upper_bound_1 = ( M +" << p_.local_size_1 - 1 << ")/" << p_.local_size_1 << "*" << p_.local_size_1 << ";" << std::endl;
|
||||
stream << "for(" << _size_t << " r = gid1; r < upper_bound_1; r += gsize1){" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (const auto & e : exprs)
|
||||
for (const auto & e : reductions)
|
||||
stream << e->process("#scalartype #name_acc = " + neutral_element((e)->root_op()) + ";") << std::endl;
|
||||
|
||||
stream << "if (r < M)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
stream << "for(unsigned int c = get_local_id(0); c < " << p_.num_groups_0 << "; c += get_local_size(0)){" << std::endl;
|
||||
stream << "for(" << _size_t << " c = lid0; c < " << p_.num_groups_0 << "; c += lsize0){" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (mapped_reduction* e: exprs)
|
||||
for (mapped_reduction* e: reductions)
|
||||
compute_reduction(stream, e->process("#name_acc"), e->process("#name_temp[r + M*c]"), e->root_op());
|
||||
|
||||
stream.dec_tab();
|
||||
@@ -225,20 +257,20 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
|
||||
for (auto & expr : exprs)
|
||||
for (auto & expr : reductions)
|
||||
stream << expr->process("#name_buf[lid1*" + local_size_0_ld_str + "+ lid0] = #name_acc;") << std::endl;
|
||||
|
||||
stream << "#pragma unroll" << std::endl;
|
||||
stream << "for(unsigned int stride = " << p_.local_size_0/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "for(" << _size_t << " stride = " << p_.local_size_0/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
stream << "barrier(CLK_LOCAL_MEM_FENCE); " << std::endl;
|
||||
stream << LocalBarrier(backend) << ";" << std::endl;
|
||||
stream << "if (lid0 < stride)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (auto & e : exprs)
|
||||
for (auto & e : reductions)
|
||||
if (e->is_index_reduction())
|
||||
compute_index_reduction(stream, e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
, e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
@@ -273,34 +305,9 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "}" << std::endl;
|
||||
}
|
||||
|
||||
// std::cout << stream.str() << std::endl;
|
||||
return stream.str();
|
||||
}
|
||||
|
||||
std::vector<std::string> mreduction::generate_impl(unsigned int label, expressions_tuple const & expressions, std::vector<mapping_type> const & mappings) const
|
||||
{
|
||||
std::vector<mapped_mreduction*> reductions;
|
||||
expressions_tuple::data_type::const_iterator sit;
|
||||
std::vector<mapping_type>::const_iterator mit;
|
||||
for (mit = mappings.begin(), sit = expressions.data().begin(); mit != mappings.end(); ++mit, ++sit)
|
||||
{
|
||||
array_expression const & first_expression = *expressions.data().front();
|
||||
std::vector<size_t> idx = filter_nodes(&is_reduction, first_expression, false);
|
||||
for (auto & elem : idx)
|
||||
reductions.push_back((mapped_mreduction*)(mit->at(mapping_key(elem, PARENT_NODE_TYPE)).get()));
|
||||
}
|
||||
|
||||
std::vector<std::string> res;
|
||||
if (reduction_type_ && p_.simd_width>1)
|
||||
{
|
||||
res.push_back(generate_impl(label, expressions, mappings, p_.simd_width, reductions));
|
||||
res.push_back(generate_impl(label, expressions, mappings, 1, reductions));
|
||||
}
|
||||
else
|
||||
res.push_back(generate_impl(label, expressions, mappings, 1, reductions));
|
||||
return res;
|
||||
}
|
||||
|
||||
mreduction::mreduction(mreduction::parameters_type const & parameters,
|
||||
mreduction::reduction_type rtype,
|
||||
binding_policy_t binding_policy) :
|
||||
@@ -317,10 +324,10 @@ std::vector<int_t> mreduction::input_sizes(expressions_tuple const & expressions
|
||||
return tools::make_vector<int_t>() << MN.first << MN.second;
|
||||
}
|
||||
|
||||
void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_compiler> & programs, unsigned int label, controller<expressions_tuple> const & controller)
|
||||
void mreduction::enqueue(driver::CommandQueue & queue, driver::Program & program, const char * suffix, base & fallback, controller<expressions_tuple> const & controller)
|
||||
{
|
||||
expressions_tuple const & expressions = controller.x();
|
||||
cl::Context const & context = expressions.context();
|
||||
driver::Context const & context = expressions.context();
|
||||
|
||||
std::vector<int_t> MN = input_sizes(expressions);
|
||||
std::vector<array_expression::node const *> reductions;
|
||||
@@ -331,35 +338,36 @@ void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_comp
|
||||
reductions.push_back(&(e)->tree()[r]);
|
||||
}
|
||||
|
||||
//Fallback
|
||||
if(reduction_type_==REDUCE_COLUMNS && p_.simd_width>1 && requires_fallback(expressions))
|
||||
{
|
||||
fallback.enqueue(queue, program, "fallback", fallback, controller);
|
||||
return;
|
||||
}
|
||||
|
||||
//Kernel
|
||||
int idx = 0;
|
||||
if(reduction_type_==REDUCE_COLUMNS && p_.simd_width>1 && requires_fallback(expressions))
|
||||
idx = 1;
|
||||
cl::Program & program = programs[idx].program();
|
||||
|
||||
std::vector< cl::Buffer > tmp;
|
||||
std::vector< cl::Buffer > tmpidx;
|
||||
std::vector< driver::Buffer > tmp;
|
||||
std::vector< driver::Buffer > tmpidx;
|
||||
unsigned int dtype_size = size_of(lhs_most(expressions.data().front()->tree(), expressions.data().front()->root()).lhs.dtype);
|
||||
|
||||
char kname[2][10];
|
||||
fill_kernel_name(kname[0], label, "d0");
|
||||
fill_kernel_name(kname[1], label, "d1");
|
||||
char name[2][32] = {{"prod"}, {"reduce"}};
|
||||
strcat(name[0], suffix);
|
||||
strcat(name[1], suffix);
|
||||
|
||||
unsigned int nk = (p_.num_groups_0==1)?1:2;
|
||||
|
||||
std::vector<cl::Kernel> kernels;
|
||||
std::vector<driver::Kernel> kernels;
|
||||
for(unsigned int k = 0 ; k < nk ; ++k)
|
||||
kernels.push_back(cl::Kernel(program, kname[k]));
|
||||
kernels.push_back(driver::Kernel(program, name[k]));
|
||||
|
||||
for(unsigned int k = 0 ; k < nk ; ++k)
|
||||
{
|
||||
cl::Kernel & kernel = kernels[k];
|
||||
driver::Kernel & kernel = kernels[k];
|
||||
unsigned int n_arg = 0;
|
||||
int_t M = MN[0];
|
||||
int_t N = MN[1];
|
||||
kernel.setArg(n_arg++, cl_uint(M));
|
||||
kernel.setArg(n_arg++, cl_uint(N));
|
||||
kernel.setSizeArg(n_arg++, M);
|
||||
kernel.setSizeArg(n_arg++, N);
|
||||
|
||||
//Temporary buffers
|
||||
unsigned int i = 0;
|
||||
@@ -369,12 +377,12 @@ void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_comp
|
||||
if (is_index_reduction(r->op))
|
||||
{
|
||||
if (tmpidx.size() <= j)
|
||||
tmpidx.push_back(cl::Buffer(context, CL_MEM_READ_WRITE, p_.num_groups_0*M*4));
|
||||
tmpidx.push_back(driver::Buffer(context, p_.num_groups_0*M*4));
|
||||
kernel.setArg(n_arg++, tmpidx[j]);
|
||||
j++;
|
||||
}
|
||||
if (tmp.size() <= i)
|
||||
tmp.push_back(cl::Buffer(context, CL_MEM_READ_WRITE, p_.num_groups_0*M*dtype_size));
|
||||
tmp.push_back(driver::Buffer(context, p_.num_groups_0*M*dtype_size));
|
||||
kernel.setArg(n_arg++, tmp[i]);
|
||||
i++;
|
||||
}
|
||||
@@ -382,10 +390,10 @@ void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_comp
|
||||
}
|
||||
|
||||
//NDRange
|
||||
cl::NDRange global[2] = { cl::NDRange(p_.local_size_0*p_.num_groups_0, p_.local_size_1*p_.num_groups_1), cl::NDRange(p_.local_size_0, p_.local_size_1*p_.num_groups_1) };
|
||||
cl::NDRange local[2] = { cl::NDRange(p_.local_size_0, p_.local_size_1), cl::NDRange(p_.local_size_0, p_.local_size_1) };
|
||||
driver::NDRange global[2] = { driver::NDRange(p_.local_size_0*p_.num_groups_0, p_.local_size_1*p_.num_groups_1), driver::NDRange(p_.local_size_0, p_.local_size_1*p_.num_groups_1) };
|
||||
driver::NDRange local[2] = { driver::NDRange(p_.local_size_0, p_.local_size_1), driver::NDRange(p_.local_size_0, p_.local_size_1) };
|
||||
for(unsigned int i = 0 ; i < nk ; ++i)
|
||||
controller.execution_options().enqueue_cache(queue, kernels[i], cl::NullRange, global[i], local[i]);
|
||||
controller.execution_options().enqueue_cache(queue, kernels[i], global[i], local[i]);
|
||||
}
|
||||
|
||||
mreduction_rows::mreduction_rows(mreduction_parameters const & parameters,
|
||||
@@ -407,7 +415,5 @@ mreduction_cols::mreduction_cols(unsigned int simd, unsigned int ls1, unsigned i
|
||||
mreduction(mreduction_parameters(simd, ls1, ls2, ng1, ng2, fetch), REDUCE_COLUMNS, bind)
|
||||
{}
|
||||
|
||||
template class base_impl<mreduction, mreduction_parameters>;
|
||||
|
||||
|
||||
}
|
||||
|
Reference in New Issue
Block a user