More efficient access pattern in the GEMV kernel
This commit is contained in:
@@ -236,44 +236,44 @@ cl::CommandQueue & queue = ad::cl_ext::queues[ad::cl_ext::default_context()][0];
|
||||
// }
|
||||
// std::cout << "\n\n" << std::flush;
|
||||
|
||||
// /*---------*/
|
||||
// /*--BLAS2--*/
|
||||
// /*---------*/
|
||||
// //T-layout
|
||||
// std::cout << "#GEMV-T" << std::endl;
|
||||
// for(int_t i = 0 ; i < BLAS2_N.size() ; ++i)
|
||||
// for(int_t j = 0 ; j < BLAS2_M.size() ; ++j)
|
||||
// {
|
||||
// int_t N = BLAS2_N[i];
|
||||
// int_t M = BLAS2_M[j];
|
||||
// std::cout << M << "," << N;
|
||||
// /* ATIDLAS */
|
||||
// ad::array A(N, M, dtype), y(M, dtype), x(N, dtype);
|
||||
// y = dot(trans(A),x); queue.finish();
|
||||
// BENCHMARK_ATIDLAS(y = ad::control(dot(trans(A),x), ad::execution_options_type(0, &events), ad::dispatcher_options_type(true)),(M*N + M + N)*dtsize/t);
|
||||
// #ifdef BENCH_CLAMDBLAS
|
||||
// BENCHMARK_CLAMDBLAS(clAmdBlasSgemv(clAmdBlasColumnMajor, clAmdBlasTrans, N, M, 1, A.data()(), A.ld(), x.data()(), 0, 1, 0, y.data()(), 0, 1, 1, &queue(),0, NULL, &event()), (M*N + M + N)*dtsize/t)
|
||||
// #endif
|
||||
// #ifdef BENCH_CBLAS
|
||||
// std::vector<float> cA(N*M), cx(N), cy(M);
|
||||
// ad::copy(x, cx);
|
||||
// ad::copy(y, cy);
|
||||
// ad::copy(A, cA);
|
||||
// BENCHMARK_HOST(cblas_sgemv(CblasColMajor, CblasTrans, N, M, 1, cA.data(), N, cx.data(), 1, 0, cy.data(), 1), (M*N + M + N)*dtsize/t);
|
||||
// #endif
|
||||
// #ifdef BENCH_CUBLAS
|
||||
// T *cuA, *cux, *cuy;
|
||||
// cudaMalloc((void**) &cuA, N * M * sizeof(T));
|
||||
// cudaMalloc((void**) &cux, N * sizeof(T));
|
||||
// cudaMalloc((void**) &cuy, M * sizeof(T));
|
||||
// BENCHMARK_CUDA(cublasSgemv(cublasTrans, N, M, 1, cuA, N, cux, 1, 0, cuy, 1), (M*N + M + N)*dtsize/t)
|
||||
// cudaFree(cuA);
|
||||
// cudaFree(cux);
|
||||
// cudaFree(cuy);
|
||||
// #endif
|
||||
// std::cout << std::endl;
|
||||
// }
|
||||
// std::cout << "\n\n" << std::flush;
|
||||
/*---------*/
|
||||
/*--BLAS2--*/
|
||||
/*---------*/
|
||||
//T-layout
|
||||
std::cout << "#GEMV-T" << std::endl;
|
||||
for(int_t i = 0 ; i < BLAS2_N.size() ; ++i)
|
||||
for(int_t j = 0 ; j < BLAS2_M.size() ; ++j)
|
||||
{
|
||||
int_t N = BLAS2_N[i];
|
||||
int_t M = BLAS2_M[j];
|
||||
std::cout << M << "," << N;
|
||||
/* ATIDLAS */
|
||||
ad::array A(N, M, dtype), y(M, dtype), x(N, dtype);
|
||||
y = dot(trans(A),x); queue.finish();
|
||||
BENCHMARK_ATIDLAS(y = ad::control(dot(trans(A),x), ad::execution_options_type(0, &events), ad::dispatcher_options_type(true)),(M*N + M + N)*dtsize/t);
|
||||
#ifdef BENCH_CLAMDBLAS
|
||||
BENCHMARK_CLAMDBLAS(clAmdBlasSgemv(clAmdBlasColumnMajor, clAmdBlasTrans, N, M, 1, A.data()(), A.ld(), x.data()(), 0, 1, 0, y.data()(), 0, 1, 1, &queue(),0, NULL, &event()), (M*N + M + N)*dtsize/t)
|
||||
#endif
|
||||
#ifdef BENCH_CBLAS
|
||||
std::vector<float> cA(N*M), cx(N), cy(M);
|
||||
ad::copy(x, cx);
|
||||
ad::copy(y, cy);
|
||||
ad::copy(A, cA);
|
||||
BENCHMARK_HOST(cblas_sgemv(CblasColMajor, CblasTrans, N, M, 1, cA.data(), N, cx.data(), 1, 0, cy.data(), 1), (M*N + M + N)*dtsize/t);
|
||||
#endif
|
||||
#ifdef BENCH_CUBLAS
|
||||
T *cuA, *cux, *cuy;
|
||||
cudaMalloc((void**) &cuA, N * M * sizeof(T));
|
||||
cudaMalloc((void**) &cux, N * sizeof(T));
|
||||
cudaMalloc((void**) &cuy, M * sizeof(T));
|
||||
BENCHMARK_CUDA(cublasSgemv(cublasTrans, N, M, 1, cuA, N, cux, 1, 0, cuy, 1), (M*N + M + N)*dtsize/t)
|
||||
cudaFree(cuA);
|
||||
cudaFree(cux);
|
||||
cudaFree(cuy);
|
||||
#endif
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::cout << "\n\n" << std::flush;
|
||||
|
||||
/*---------*/
|
||||
/*--BLAS3--*/
|
||||
|
@@ -12,10 +12,10 @@ __global__ void dummy(){}
|
||||
|
||||
int main()
|
||||
{
|
||||
for(const auto & elem : ad::cl_ext::queues.data())
|
||||
for(ad::cl_ext::queues_type::data_type::const_iterator it = ad::cl_ext::queues.data().begin() ; it != ad::cl_ext::queues.data().end() ; ++it)
|
||||
{
|
||||
cl::CommandQueue queue = elem.second[0];
|
||||
cl::Context context = elem.first;
|
||||
cl::CommandQueue queue = it->second[0];
|
||||
cl::Context context = it->first;
|
||||
cl::Device device = queue.getInfo<CL_QUEUE_DEVICE>();
|
||||
cl::Program program(context,"__kernel void dummy(){}");
|
||||
program.build();
|
||||
|
@@ -13,7 +13,7 @@ struct mreduction_parameters : public base::parameters_type
|
||||
{
|
||||
mreduction_parameters(unsigned int _simd_width,
|
||||
unsigned int _local_size_0, unsigned int _local_size_1,
|
||||
unsigned int _num_groups_0, fetching_policy_type _fetch_policy);
|
||||
unsigned int _num_groups_0, unsigned int _num_groups_1, fetching_policy_type _fetch_policy);
|
||||
unsigned int num_groups_0;
|
||||
unsigned int num_groups_1;
|
||||
fetching_policy_type fetch_policy;
|
||||
@@ -45,14 +45,14 @@ class mreduction_rows : public mreduction
|
||||
{
|
||||
public:
|
||||
mreduction_rows(mreduction::parameters_type const &, binding_policy_t binding_policy = BIND_ALL_UNIQUE);
|
||||
mreduction_rows(unsigned int simd, unsigned int ls1, unsigned int ls2, unsigned int ng, fetching_policy_type fetch, binding_policy_t bind = BIND_ALL_UNIQUE);
|
||||
mreduction_rows(unsigned int simd, unsigned int ls1, unsigned int ls2, unsigned int ng1, unsigned int ng2, fetching_policy_type fetch, binding_policy_t bind = BIND_ALL_UNIQUE);
|
||||
};
|
||||
|
||||
class mreduction_cols : public mreduction
|
||||
{
|
||||
public:
|
||||
mreduction_cols(mreduction::parameters_type const &, binding_policy_t binding_policy = BIND_ALL_UNIQUE);
|
||||
mreduction_cols(unsigned int simd, unsigned int ls1, unsigned int ls2, unsigned int ng, fetching_policy_type fetch, binding_policy_t bind = BIND_ALL_UNIQUE);
|
||||
mreduction_cols(unsigned int simd, unsigned int ls1, unsigned int ls2, unsigned int ng1, unsigned int ng2, fetching_policy_type fetch, binding_policy_t bind = BIND_ALL_UNIQUE);
|
||||
};
|
||||
|
||||
}
|
||||
|
@@ -10,8 +10,8 @@ namespace atidlas
|
||||
|
||||
mreduction_parameters::mreduction_parameters(unsigned int _simd_width,
|
||||
unsigned int _local_size_0, unsigned int _local_size_1,
|
||||
unsigned int _num_groups_0, fetching_policy_type _fetch_policy): base::parameters_type(_simd_width, _local_size_0, _local_size_1, 1),
|
||||
num_groups_0(_num_groups_0), num_groups_1(2), fetch_policy(_fetch_policy) { }
|
||||
unsigned int _num_groups_0, unsigned int _num_groups_1, fetching_policy_type _fetch_policy): base::parameters_type(_simd_width, _local_size_0, _local_size_1, 1),
|
||||
num_groups_0(_num_groups_0), num_groups_1(_num_groups_1), fetch_policy(_fetch_policy) { }
|
||||
|
||||
|
||||
int mreduction::check_invalid_impl(cl::Device const &, expressions_tuple const &) const
|
||||
@@ -29,8 +29,7 @@ unsigned int mreduction::lmem_usage() const
|
||||
std::string mreduction::generate_impl(unsigned int label, expressions_tuple const & expressions, std::vector<mapping_type> const & mappings, unsigned int simd_width, std::vector<mapped_mreduction*> const & exprs) const
|
||||
{
|
||||
using tools::to_string;
|
||||
unsigned int local_size_1_ld = p_.local_size_1+1;
|
||||
std::string local_size_1_ld_str = to_string(local_size_1_ld);
|
||||
|
||||
|
||||
kernel_generation_stream stream;
|
||||
|
||||
@@ -61,13 +60,16 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
{"array2", "#pointer += #start1 + #start2*#ld; "
|
||||
"#ld *= #nldstride; "}}, expressions, mappings);
|
||||
|
||||
unsigned int local_size_0_ld = p_.local_size_0+1;
|
||||
std::string local_size_0_ld_str = to_string(local_size_0_ld);
|
||||
|
||||
for (const auto & e : exprs)
|
||||
stream << e->process("__local #scalartype #name_buf[" + to_string(p_.local_size_0*local_size_1_ld) + "];") << std::endl;
|
||||
stream << e->process("__local #scalartype #name_buf[" + to_string(p_.local_size_1*local_size_0_ld) + "];") << std::endl;
|
||||
|
||||
stream << "unsigned int lid0 = get_local_id(0);" << std::endl;
|
||||
stream << "unsigned int lid1 = get_local_id(1);" << std::endl;
|
||||
stream << "unsigned int upper_bound_0 = ( M +" << p_.local_size_0 - 1 << ")/" << p_.local_size_0 << "*" << p_.local_size_0 << ";" << std::endl;
|
||||
stream << "for(unsigned int r = get_global_id(0); r < upper_bound_0; r += get_global_size(0)){" << std::endl;
|
||||
stream << "unsigned int upper_bound_1 = ( M +" << p_.local_size_1 - 1 << ")/" << p_.local_size_1 << "*" << p_.local_size_1 << ";" << std::endl;
|
||||
stream << "for(unsigned int r = get_global_id(1); r < upper_bound_1; r += get_global_size(1)){" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (const auto & e : exprs)
|
||||
@@ -77,7 +79,7 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
element_wise_loop_1D(stream, p_.fetch_policy, simd_width, "c", "N", "get_global_id(1)", "get_global_size(1)", [&](unsigned int simd_width)
|
||||
element_wise_loop_1D(stream, p_.fetch_policy, simd_width, "c", "N", "get_global_id(0)", "get_global_size(0)", [&](unsigned int simd_width)
|
||||
{
|
||||
std::string data_type = append_width("#scalartype",simd_width);
|
||||
|
||||
@@ -121,25 +123,25 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "}" << std::endl;
|
||||
|
||||
for (auto & expr : exprs)
|
||||
stream << expr->process("#name_buf[lid0*" + local_size_1_ld_str + "+ lid1] = #name_acc;") << std::endl;
|
||||
stream << expr->process("#name_buf[lid1*" + local_size_0_ld_str + "+ lid0] = #name_acc;") << std::endl;
|
||||
|
||||
stream << "#pragma unroll" << std::endl;
|
||||
stream << "for(unsigned int stride = " << p_.local_size_1/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "for(unsigned int stride = " << p_.local_size_0/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
stream << "barrier(CLK_LOCAL_MEM_FENCE); " << std::endl;
|
||||
stream << "if (lid1 < stride)" << std::endl;
|
||||
stream << "if (lid0 < stride)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (auto & e : exprs)
|
||||
if (e->is_index_reduction())
|
||||
compute_index_reduction(stream, e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1]"), e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1 + stride]")
|
||||
, e->process("#name_buf_value[lid0*" + local_size_1_ld_str + " + lid1]"), e->process("#name_buf_value[lid0*" + local_size_1_ld_str + " + lid1 + stride]")
|
||||
compute_index_reduction(stream, e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
, e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
, e->root_op());
|
||||
else
|
||||
compute_reduction(stream,e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1]"), e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1 + stride]"), e->root_op());
|
||||
compute_reduction(stream,e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0 + stride]"), e->root_op());
|
||||
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
@@ -148,15 +150,24 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "}" << std::endl;
|
||||
|
||||
|
||||
stream << "if (lid1 == 0 && r < M)";
|
||||
stream << "if (lid0 == 0 && r < M)";
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
if(p_.num_groups_0==1)
|
||||
{
|
||||
std::map<std::string, std::string> accessors;
|
||||
accessors["mreduction"] = "#name_buf[lid1*" + local_size_0_ld_str + "]";
|
||||
accessors["array1"] = "#pointer[r*#stride]";
|
||||
evaluate(stream, PARENT_NODE_TYPE, accessors, expressions, mappings);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (mapped_reduction const * e : exprs)
|
||||
{
|
||||
if (e->is_index_reduction())
|
||||
stream << e->process("#name_temp_value[r + M*get_group_id(1)] = #name_buf_value[lid0*" + local_size_1_ld_str + "];") << std::endl;
|
||||
stream << e->process("#name_temp[r + M*get_group_id(1)] = #name_buf[lid0*" + local_size_1_ld_str + "];") << std::endl;
|
||||
stream << e->process("#name_temp_value[r + M*get_group_id(0)] = #name_buf_value[lid1*" + local_size_0_ld_str + "];") << std::endl;
|
||||
stream << e->process("#name_temp[r + M*get_group_id(0)] = #name_buf[lid1*" + local_size_0_ld_str + "];") << std::endl;
|
||||
}
|
||||
}
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
@@ -168,6 +179,8 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
|
||||
if(p_.num_groups_0>1)
|
||||
{
|
||||
/////////////////////////////////////////
|
||||
////////////// Kernel 2
|
||||
////////////////////////////////////////
|
||||
@@ -184,12 +197,12 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
"#ld *= #nldstride; "}}, expressions, mappings);
|
||||
|
||||
for (const auto & e : exprs)
|
||||
stream << e->process("__local #scalartype #name_buf[" + to_string(p_.local_size_0*local_size_1_ld) + "];") << std::endl;
|
||||
stream << e->process("__local #scalartype #name_buf[" + to_string(p_.local_size_1*local_size_0_ld) + "];") << std::endl;
|
||||
|
||||
stream << "unsigned int lid0 = get_local_id(0);" << std::endl;
|
||||
stream << "unsigned int lid1 = get_local_id(1);" << std::endl;
|
||||
stream << "unsigned int upper_bound_0 = ( M +" << p_.local_size_0 - 1 << ")/" << p_.local_size_0 << "*" << p_.local_size_0 << ";" << std::endl;
|
||||
stream << "for(unsigned int r = get_global_id(0); r < upper_bound_0; r += get_global_size(0)){" << std::endl;
|
||||
stream << "unsigned int upper_bound_1 = ( M +" << p_.local_size_1 - 1 << ")/" << p_.local_size_1 << "*" << p_.local_size_1 << ";" << std::endl;
|
||||
stream << "for(unsigned int r = get_global_id(1); r < upper_bound_1; r += get_global_size(1)){" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (const auto & e : exprs)
|
||||
@@ -199,7 +212,7 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
stream << "for(unsigned int c = get_local_id(1); c < " << p_.num_groups_1 << "; c += get_local_size(1)){" << std::endl;
|
||||
stream << "for(unsigned int c = get_local_id(0); c < " << p_.num_groups_0 << "; c += get_local_size(0)){" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (mapped_reduction* e: exprs)
|
||||
@@ -213,25 +226,25 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "}" << std::endl;
|
||||
|
||||
for (auto & expr : exprs)
|
||||
stream << expr->process("#name_buf[lid0*" + local_size_1_ld_str + "+ lid1] = #name_acc;") << std::endl;
|
||||
stream << expr->process("#name_buf[lid1*" + local_size_0_ld_str + "+ lid0] = #name_acc;") << std::endl;
|
||||
|
||||
stream << "#pragma unroll" << std::endl;
|
||||
stream << "for(unsigned int stride = " << p_.local_size_1/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "for(unsigned int stride = " << p_.local_size_0/2 << "; stride >0; stride /=2)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
stream << "barrier(CLK_LOCAL_MEM_FENCE); " << std::endl;
|
||||
stream << "if (lid1 < stride)" << std::endl;
|
||||
stream << "if (lid0 < stride)" << std::endl;
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
for (auto & e : exprs)
|
||||
if (e->is_index_reduction())
|
||||
compute_index_reduction(stream, e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1]"), e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1 + stride]")
|
||||
, e->process("#name_buf_value[lid0*" + local_size_1_ld_str + " + lid1]"), e->process("#name_buf_value[lid0*" + local_size_1_ld_str + " + lid1 + stride]")
|
||||
compute_index_reduction(stream, e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
, e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf_value[lid1*" + local_size_0_ld_str + " + lid0 + stride]")
|
||||
, e->root_op());
|
||||
else
|
||||
compute_reduction(stream,e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1]"), e->process("#name_buf[lid0*" + local_size_1_ld_str + " + lid1 + stride]"), e->root_op());
|
||||
compute_reduction(stream,e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0]"), e->process("#name_buf[lid1*" + local_size_0_ld_str + " + lid0 + stride]"), e->root_op());
|
||||
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
@@ -240,12 +253,12 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
stream << "}" << std::endl;
|
||||
|
||||
|
||||
stream << "if (lid1 == 0 && r < M)";
|
||||
stream << "if (lid0 == 0 && r < M)";
|
||||
stream << "{" << std::endl;
|
||||
stream.inc_tab();
|
||||
|
||||
std::map<std::string, std::string> accessors;
|
||||
accessors["mreduction"] = "#name_buf[lid0*" + local_size_1_ld_str + "]";
|
||||
accessors["mreduction"] = "#name_buf[lid1*" + local_size_0_ld_str + "]";
|
||||
accessors["array1"] = "#pointer[r*#stride]";
|
||||
evaluate(stream, PARENT_NODE_TYPE, accessors, expressions, mappings);
|
||||
|
||||
@@ -258,7 +271,7 @@ std::string mreduction::generate_impl(unsigned int label, expressions_tuple cons
|
||||
|
||||
stream.dec_tab();
|
||||
stream << "}" << std::endl;
|
||||
|
||||
}
|
||||
|
||||
// std::cout << stream.str() << std::endl;
|
||||
return stream.str();
|
||||
@@ -309,11 +322,6 @@ void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_comp
|
||||
expressions_tuple const & expressions = controller.x();
|
||||
cl::Context const & context = expressions.context();
|
||||
|
||||
char k0[10];
|
||||
char k1[10];
|
||||
fill_kernel_name(k0, label, "d0");
|
||||
fill_kernel_name(k1, label, "d1");
|
||||
|
||||
std::vector<int_t> MN = input_sizes(expressions);
|
||||
std::vector<array_expression::node const *> reductions;
|
||||
for (const auto & e : expressions.data())
|
||||
@@ -330,21 +338,28 @@ void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_comp
|
||||
idx = 1;
|
||||
cl::Program & program = programs[idx].program();
|
||||
|
||||
//NDRange
|
||||
cl::Kernel kernels[2] = { cl::Kernel(program, k0), cl::Kernel(program, k1)};
|
||||
cl::NDRange global[2] = { cl::NDRange(p_.local_size_0*p_.num_groups_0, p_.local_size_1*p_.num_groups_1), cl::NDRange(p_.local_size_0*p_.num_groups_0, p_.local_size_1) };
|
||||
cl::NDRange local[2] = { cl::NDRange(p_.local_size_0, p_.local_size_1), cl::NDRange(p_.local_size_0, p_.local_size_1) };
|
||||
|
||||
std::vector< cl::Buffer > tmp;
|
||||
std::vector< cl::Buffer > tmpidx;
|
||||
unsigned int dtype_size = size_of(lhs_most(expressions.data().front()->tree(), expressions.data().front()->root()).lhs.dtype);
|
||||
for (auto & k : kernels)
|
||||
|
||||
char kname[2][10];
|
||||
fill_kernel_name(kname[0], label, "d0");
|
||||
fill_kernel_name(kname[1], label, "d1");
|
||||
|
||||
unsigned int nk = (p_.num_groups_0==1)?1:2;
|
||||
|
||||
std::vector<cl::Kernel> kernels;
|
||||
for(unsigned int k = 0 ; k < nk ; ++k)
|
||||
kernels.push_back(cl::Kernel(program, kname[k]));
|
||||
|
||||
for(unsigned int k = 0 ; k < nk ; ++k)
|
||||
{
|
||||
cl::Kernel & kernel = kernels[k];
|
||||
unsigned int n_arg = 0;
|
||||
int_t M = MN[0];
|
||||
int_t N = MN[1];
|
||||
k.setArg(n_arg++, cl_uint(M));
|
||||
k.setArg(n_arg++, cl_uint(N));
|
||||
kernel.setArg(n_arg++, cl_uint(M));
|
||||
kernel.setArg(n_arg++, cl_uint(N));
|
||||
|
||||
//Temporary buffers
|
||||
unsigned int i = 0;
|
||||
@@ -354,19 +369,22 @@ void mreduction::enqueue(cl::CommandQueue & queue, std::vector<cl_ext::lazy_comp
|
||||
if (is_index_reduction(r->op))
|
||||
{
|
||||
if (tmpidx.size() <= j)
|
||||
tmpidx.push_back(cl::Buffer(context, CL_MEM_READ_WRITE, p_.num_groups_1*M*4));
|
||||
k.setArg(n_arg++, tmpidx[j]);
|
||||
tmpidx.push_back(cl::Buffer(context, CL_MEM_READ_WRITE, p_.num_groups_0*M*4));
|
||||
kernel.setArg(n_arg++, tmpidx[j]);
|
||||
j++;
|
||||
}
|
||||
if (tmp.size() <= i)
|
||||
tmp.push_back(cl::Buffer(context, CL_MEM_READ_WRITE, p_.num_groups_1*M*dtype_size));
|
||||
k.setArg(n_arg++, tmp[i]);
|
||||
tmp.push_back(cl::Buffer(context, CL_MEM_READ_WRITE, p_.num_groups_0*M*dtype_size));
|
||||
kernel.setArg(n_arg++, tmp[i]);
|
||||
i++;
|
||||
}
|
||||
set_arguments(expressions, k, n_arg);
|
||||
set_arguments(expressions, kernel, n_arg);
|
||||
}
|
||||
|
||||
for(unsigned int i = 0 ; i < 2 ; ++i)
|
||||
//NDRange
|
||||
cl::NDRange global[2] = { cl::NDRange(p_.local_size_0*p_.num_groups_0, p_.local_size_1*p_.num_groups_1), cl::NDRange(p_.local_size_0, p_.local_size_1*p_.num_groups_1) };
|
||||
cl::NDRange local[2] = { cl::NDRange(p_.local_size_0, p_.local_size_1), cl::NDRange(p_.local_size_0, p_.local_size_1) };
|
||||
for(unsigned int i = 0 ; i < nk ; ++i)
|
||||
controller.execution_options().enqueue_cache(queue, kernels[i], cl::NullRange, global[i], local[i]);
|
||||
}
|
||||
|
||||
@@ -375,8 +393,8 @@ mreduction_rows::mreduction_rows(mreduction_parameters const & parameters,
|
||||
mreduction(parameters, REDUCE_ROWS, binding_policy){}
|
||||
|
||||
mreduction_rows::mreduction_rows(unsigned int simd, unsigned int ls1, unsigned int ls2,
|
||||
unsigned int ng, fetching_policy_type fetch, binding_policy_t bind):
|
||||
mreduction(mreduction_parameters(simd, ls1, ls2, ng, fetch), REDUCE_ROWS, bind)
|
||||
unsigned int ng1, unsigned int ng2, fetching_policy_type fetch, binding_policy_t bind):
|
||||
mreduction(mreduction_parameters(simd, ls1, ls2, ng1, ng2, fetch), REDUCE_ROWS, bind)
|
||||
{}
|
||||
|
||||
|
||||
@@ -385,8 +403,8 @@ mreduction_cols::mreduction_cols(mreduction::parameters_type const & parameters
|
||||
mreduction(parameters, REDUCE_COLUMNS, binding_policy){}
|
||||
|
||||
mreduction_cols::mreduction_cols(unsigned int simd, unsigned int ls1, unsigned int ls2,
|
||||
unsigned int ng, fetching_policy_type fetch, binding_policy_t bind):
|
||||
mreduction(mreduction_parameters(simd, ls1, ls2, ng, fetch), REDUCE_COLUMNS, bind)
|
||||
unsigned int ng1, unsigned int ng2, fetching_policy_type fetch, binding_policy_t bind):
|
||||
mreduction(mreduction_parameters(simd, ls1, ls2, ng1, ng2, fetch), REDUCE_COLUMNS, bind)
|
||||
{}
|
||||
|
||||
template class base_impl<mreduction, mreduction_parameters>;
|
||||
|
@@ -171,9 +171,9 @@ namespace detail
|
||||
else if(template_name=="maxpy")
|
||||
return tools::shared_ptr<base>(new maxpy(a[0], a[1], a[2], a[3], a[4], fetch[a[5]]));
|
||||
else if(template_name.find("gemvN")!=std::string::npos)
|
||||
return tools::shared_ptr<base>(new mreduction_rows(a[0], a[1], a[2], a[3], fetch[a[4]]));
|
||||
return tools::shared_ptr<base>(new mreduction_rows(a[0], a[1], a[2], a[3], a[4], fetch[a[5]]));
|
||||
else if(template_name.find("gemvT")!=std::string::npos)
|
||||
return tools::shared_ptr<base>(new mreduction_cols(a[0], a[1], a[2], a[3], fetch[a[4]]));
|
||||
return tools::shared_ptr<base>(new mreduction_cols(a[0], a[1], a[2], a[3], a[4], fetch[a[5]]));
|
||||
else if(template_name.find("gemmNN")!=std::string::npos)
|
||||
return tools::shared_ptr<base>(new mproduct_nn(a[0], a[1], a[2], a[3], a[4], a[5], a[6], fetch[a[7]], fetch[a[8]], a[9], a[10]));
|
||||
else if(template_name.find("gemmTN")!=std::string::npos)
|
||||
@@ -247,8 +247,8 @@ model_map_t init_models(cl::CommandQueue & queue)
|
||||
res[std::make_pair(VECTOR_AXPY_TYPE, DTYPE)] = ptr_t (new model(vaxpy(1,64,128,FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(REDUCTION_TYPE, DTYPE)] = ptr_t(new model(reduction(1,64,128,FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(MATRIX_AXPY_TYPE, DTYPE)] = ptr_t(new model(maxpy(1,8,8,8,8,FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(ROW_WISE_REDUCTION_TYPE, DTYPE)] = ptr_t(new model(mreduction_rows(1, 8, 8, 16, FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(COL_WISE_REDUCTION_TYPE, DTYPE)] = ptr_t(new model(mreduction_cols(1, 8, 16, 128, FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(ROW_WISE_REDUCTION_TYPE, DTYPE)] = ptr_t(new model(mreduction_rows(1, 8, 8, 4, 16, FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(COL_WISE_REDUCTION_TYPE, DTYPE)] = ptr_t(new model(mreduction_cols(1, 8, 8, 64, 8, FETCH_FROM_GLOBAL_STRIDED), queue));
|
||||
res[std::make_pair(MATRIX_PRODUCT_NN_TYPE, DTYPE)] = ptr_t(new model(mproduct_nn(1, 8, 8, 8, 4, 1, 4, FETCH_FROM_LOCAL, FETCH_FROM_LOCAL, 8, 8), queue));
|
||||
res[std::make_pair(MATRIX_PRODUCT_TN_TYPE, DTYPE)] = ptr_t(new model(mproduct_tn(1, 8, 8, 8, 4, 1, 4, FETCH_FROM_LOCAL, FETCH_FROM_LOCAL, 8, 8), queue));
|
||||
res[std::make_pair(MATRIX_PRODUCT_NT_TYPE, DTYPE)] = ptr_t(new model(mproduct_nt(1, 8, 8, 8, 4, 1, 4, FETCH_FROM_LOCAL, FETCH_FROM_LOCAL, 8, 8), queue));
|
||||
|
@@ -42,8 +42,8 @@ class GeneticOperators(object):
|
||||
atd.vaxpy: [3,4,4,atd.fetching_policy_type],
|
||||
atd.reduction: [3,4,4,atd.fetching_policy_type],
|
||||
atd.maxpy: [3,3,3,3,3,atd.fetching_policy_type],
|
||||
atd.mreduction_rows: [3,3,3,4,atd.fetching_policy_type],
|
||||
atd.mreduction_cols: [3,3,3,4,atd.fetching_policy_type],
|
||||
atd.mreduction_rows: [3,3,3,3,3,atd.fetching_policy_type],
|
||||
atd.mreduction_cols: [3,3,3,3,3,atd.fetching_policy_type],
|
||||
atd.mproduct_nn: [3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3],
|
||||
atd.mproduct_nt: [3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3],
|
||||
atd.mproduct_tn: [3,3,3,3,3,3,3,atd.fetching_policy_type,atd.fetching_policy_type,3],
|
||||
|
@@ -674,8 +674,8 @@ void export_model()
|
||||
WRAP_SINGLE_TEMPLATE(maxpy, uint, uint, uint, uint, uint, atidlas::fetching_policy_type)
|
||||
WRAP_SINGLE_TEMPLATE(reduction, uint, uint, uint, atidlas::fetching_policy_type)
|
||||
WRAP_BASE(mreduction)
|
||||
WRAP_TEMPLATE(mreduction_rows, mreduction, uint, uint, uint, uint, atidlas::fetching_policy_type)
|
||||
WRAP_TEMPLATE(mreduction_cols, mreduction, uint, uint, uint, uint, atidlas::fetching_policy_type)
|
||||
WRAP_TEMPLATE(mreduction_rows, mreduction, uint, uint, uint, uint, uint, atidlas::fetching_policy_type)
|
||||
WRAP_TEMPLATE(mreduction_cols, mreduction, uint, uint, uint, uint, uint, atidlas::fetching_policy_type)
|
||||
WRAP_BASE(mproduct)
|
||||
WRAP_TEMPLATE(mproduct_nn, mproduct, uint, uint, uint, uint, uint, uint, uint, atidlas::fetching_policy_type, atidlas::fetching_policy_type, uint, uint)
|
||||
WRAP_TEMPLATE(mproduct_tn, mproduct, uint, uint, uint, uint, uint, uint, uint, atidlas::fetching_policy_type, atidlas::fetching_policy_type, uint, uint)
|
||||
|
Reference in New Issue
Block a user