DPNP C++ backend kernel library 0.20.0dev1
Data Parallel Extension for NumPy*
Loading...
Searching...
No Matches
common.hpp
1//*****************************************************************************
2// Copyright (c) 2025, Intel Corporation
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7// - Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// - Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// - Neither the name of the copyright holder nor the names of its contributors
13// may be used to endorse or promote products derived from this software
14// without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26// THE POSSIBILITY OF SUCH DAMAGE.
27//*****************************************************************************
28
29#pragma once
30
31#include <algorithm>
32#include <cstddef>
33#include <cstdint>
34#include <utility>
35#include <vector>
36
37#include <sycl/sycl.hpp>
38
39// dpctl tensor headers
40#include "kernels/alignment.hpp"
41#include "kernels/elementwise_functions/common.hpp"
42#include "utils/sycl_utils.hpp"
43
44namespace dpnp::extensions::py_internal::elementwise_common
45{
46using dpctl::tensor::kernels::alignment_utils::
47 disabled_sg_loadstore_wrapper_krn;
48using dpctl::tensor::kernels::alignment_utils::is_aligned;
49using dpctl::tensor::kernels::alignment_utils::required_alignment;
50
51using dpctl::tensor::kernels::elementwise_common::select_lws;
52
53using dpctl::tensor::sycl_utils::sub_group_load;
54using dpctl::tensor::sycl_utils::sub_group_store;
55
63template <typename argT,
64 typename resT1,
65 typename resT2,
66 typename UnaryTwoOutputsOpT,
67 std::uint8_t vec_sz = 4u,
68 std::uint8_t n_vecs = 2u,
69 bool enable_sg_loadstore = true>
71{
72private:
73 const argT *in = nullptr;
74 resT1 *out1 = nullptr;
75 resT2 *out2 = nullptr;
76 std::size_t nelems_;
77
78public:
79 UnaryTwoOutputsContigFunctor(const argT *inp,
80 resT1 *res1,
81 resT2 *res2,
82 const std::size_t n_elems)
83 : in(inp), out1(res1), out2(res2), nelems_(n_elems)
84 {
85 }
86
87 void operator()(sycl::nd_item<1> ndit) const
88 {
89 static constexpr std::uint8_t elems_per_wi = n_vecs * vec_sz;
90 UnaryTwoOutputsOpT op{};
91 /* Each work-item processes vec_sz elements, contiguous in memory */
92 /* NOTE: work-group size must be divisible by sub-group size */
93
94 if constexpr (enable_sg_loadstore &&
95 UnaryTwoOutputsOpT::is_constant::value) {
96 // value of operator is known to be a known constant
97 constexpr resT1 const_val1 = UnaryTwoOutputsOpT::constant_value1;
98 constexpr resT2 const_val2 = UnaryTwoOutputsOpT::constant_value2;
99
100 auto sg = ndit.get_sub_group();
101 const std::uint16_t sgSize = sg.get_max_local_range()[0];
102
103 const std::size_t base =
104 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
105 sg.get_group_id()[0] * sgSize);
106 if (base + elems_per_wi * sgSize < nelems_) {
107 static constexpr sycl::vec<resT1, vec_sz> res1_vec(const_val1);
108 static constexpr sycl::vec<resT2, vec_sz> res2_vec(const_val2);
109#pragma unroll
110 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
111 const std::size_t offset = base + it * sgSize;
112 auto out1_multi_ptr = sycl::address_space_cast<
113 sycl::access::address_space::global_space,
114 sycl::access::decorated::yes>(&out1[offset]);
115 auto out2_multi_ptr = sycl::address_space_cast<
116 sycl::access::address_space::global_space,
117 sycl::access::decorated::yes>(&out2[offset]);
118
119 sub_group_store<vec_sz>(sg, res1_vec, out1_multi_ptr);
120 sub_group_store<vec_sz>(sg, res2_vec, out2_multi_ptr);
121 }
122 }
123 else {
124 const std::size_t lane_id = sg.get_local_id()[0];
125 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
126 out1[k] = const_val1;
127 out2[k] = const_val2;
128 }
129 }
130 }
131 else if constexpr (enable_sg_loadstore &&
132 UnaryTwoOutputsOpT::supports_sg_loadstore::value &&
133 UnaryTwoOutputsOpT::supports_vec::value &&
134 (vec_sz > 1))
135 {
136 auto sg = ndit.get_sub_group();
137 const std::uint16_t sgSize = sg.get_max_local_range()[0];
138
139 const std::size_t base =
140 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
141 sg.get_group_id()[0] * sgSize);
142 if (base + elems_per_wi * sgSize < nelems_) {
143#pragma unroll
144 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
145 const std::size_t offset = base + it * sgSize;
146 auto in_multi_ptr = sycl::address_space_cast<
147 sycl::access::address_space::global_space,
148 sycl::access::decorated::yes>(&in[offset]);
149 auto out1_multi_ptr = sycl::address_space_cast<
150 sycl::access::address_space::global_space,
151 sycl::access::decorated::yes>(&out1[offset]);
152 auto out2_multi_ptr = sycl::address_space_cast<
153 sycl::access::address_space::global_space,
154 sycl::access::decorated::yes>(&out2[offset]);
155
156 const sycl::vec<argT, vec_sz> x =
157 sub_group_load<vec_sz>(sg, in_multi_ptr);
158 sycl::vec<resT2, vec_sz> res2_vec = {};
159 const sycl::vec<resT1, vec_sz> res1_vec = op(x, res2_vec);
160 sub_group_store<vec_sz>(sg, res1_vec, out1_multi_ptr);
161 sub_group_store<vec_sz>(sg, res2_vec, out2_multi_ptr);
162 }
163 }
164 else {
165 const std::size_t lane_id = sg.get_local_id()[0];
166 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
167 // scalar call
168 out1[k] = op(in[k], out2[k]);
169 }
170 }
171 }
172 else if constexpr (enable_sg_loadstore &&
173 UnaryTwoOutputsOpT::supports_sg_loadstore::value &&
174 std::is_same_v<resT1, argT>)
175 {
176 // default: use scalar-value function
177
178 auto sg = ndit.get_sub_group();
179 const std::uint16_t sgSize = sg.get_max_local_range()[0];
180 const std::size_t base =
181 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
182 sg.get_group_id()[0] * sgSize);
183
184 if (base + elems_per_wi * sgSize < nelems_) {
185#pragma unroll
186 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
187 const std::size_t offset = base + it * sgSize;
188 auto in_multi_ptr = sycl::address_space_cast<
189 sycl::access::address_space::global_space,
190 sycl::access::decorated::yes>(&in[offset]);
191 auto out1_multi_ptr = sycl::address_space_cast<
192 sycl::access::address_space::global_space,
193 sycl::access::decorated::yes>(&out1[offset]);
194 auto out2_multi_ptr = sycl::address_space_cast<
195 sycl::access::address_space::global_space,
196 sycl::access::decorated::yes>(&out2[offset]);
197
198 sycl::vec<argT, vec_sz> arg_vec =
199 sub_group_load<vec_sz>(sg, in_multi_ptr);
200 sycl::vec<resT2, vec_sz> res2_vec = {};
201#pragma unroll
202 for (std::uint32_t k = 0; k < vec_sz; ++k) {
203 arg_vec[k] = op(arg_vec[k], res2_vec[k]);
204 }
205 sub_group_store<vec_sz>(sg, arg_vec, out1_multi_ptr);
206 sub_group_store<vec_sz>(sg, res2_vec, out2_multi_ptr);
207 }
208 }
209 else {
210 const std::size_t lane_id = sg.get_local_id()[0];
211 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
212 out1[k] = op(in[k], out2[k]);
213 }
214 }
215 }
216 else if constexpr (enable_sg_loadstore &&
217 UnaryTwoOutputsOpT::supports_sg_loadstore::value)
218 {
219 // default: use scalar-value function
220
221 auto sg = ndit.get_sub_group();
222 const std::uint16_t sgSize = sg.get_max_local_range()[0];
223 const std::size_t base =
224 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
225 sg.get_group_id()[0] * sgSize);
226
227 if (base + elems_per_wi * sgSize < nelems_) {
228#pragma unroll
229 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
230 const std::size_t offset = base + it * sgSize;
231 auto in_multi_ptr = sycl::address_space_cast<
232 sycl::access::address_space::global_space,
233 sycl::access::decorated::yes>(&in[offset]);
234 auto out1_multi_ptr = sycl::address_space_cast<
235 sycl::access::address_space::global_space,
236 sycl::access::decorated::yes>(&out1[offset]);
237 auto out2_multi_ptr = sycl::address_space_cast<
238 sycl::access::address_space::global_space,
239 sycl::access::decorated::yes>(&out2[offset]);
240
241 const sycl::vec<argT, vec_sz> arg_vec =
242 sub_group_load<vec_sz>(sg, in_multi_ptr);
243 sycl::vec<resT1, vec_sz> res1_vec = {};
244 sycl::vec<resT2, vec_sz> res2_vec = {};
245#pragma unroll
246 for (std::uint8_t k = 0; k < vec_sz; ++k) {
247 res1_vec[k] = op(arg_vec[k], res2_vec[k]);
248 }
249 sub_group_store<vec_sz>(sg, res1_vec, out1_multi_ptr);
250 sub_group_store<vec_sz>(sg, res2_vec, out2_multi_ptr);
251 }
252 }
253 else {
254 const std::size_t lane_id = sg.get_local_id()[0];
255 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
256 out1[k] = op(in[k], out2[k]);
257 }
258 }
259 }
260 else {
261 const std::uint16_t sgSize =
262 ndit.get_sub_group().get_local_range()[0];
263 const std::size_t gid = ndit.get_global_linear_id();
264 const std::uint16_t elems_per_sg = sgSize * elems_per_wi;
265
266 const std::size_t start =
267 (gid / sgSize) * (elems_per_sg - sgSize) + gid;
268 const std::size_t end = std::min(nelems_, start + elems_per_sg);
269 for (std::size_t offset = start; offset < end; offset += sgSize) {
270 out1[offset] = op(in[offset], out2[offset]);
271 }
272 }
273 }
274};
275
283template <typename argT,
284 typename resT1,
285 typename resT2,
286 typename IndexerT,
287 typename UnaryTwoOutputsOpT>
289{
290private:
291 const argT *inp_ = nullptr;
292 resT1 *res1_ = nullptr;
293 resT2 *res2_ = nullptr;
294 IndexerT inp_out_indexer_;
295
296public:
297 UnaryTwoOutputsStridedFunctor(const argT *inp_p,
298 resT1 *res1_p,
299 resT2 *res2_p,
300 const IndexerT &inp_out_indexer)
301 : inp_(inp_p), res1_(res1_p), res2_(res2_p),
302 inp_out_indexer_(inp_out_indexer)
303 {
304 }
305
306 void operator()(sycl::id<1> wid) const
307 {
308 const auto &offsets_ = inp_out_indexer_(wid.get(0));
309 const ssize_t &inp_offset = offsets_.get_first_offset();
310 const ssize_t &res1_offset = offsets_.get_second_offset();
311 const ssize_t &res2_offset = offsets_.get_third_offset();
312
313 UnaryTwoOutputsOpT op{};
314
315 res1_[res1_offset] = op(inp_[inp_offset], res2_[res2_offset]);
316 }
317};
318
326template <typename argT1,
327 typename argT2,
328 typename resT1,
329 typename resT2,
330 typename BinaryOperatorT,
331 std::uint8_t vec_sz = 4u,
332 std::uint8_t n_vecs = 2u,
333 bool enable_sg_loadstore = true>
335{
336private:
337 const argT1 *in1 = nullptr;
338 const argT2 *in2 = nullptr;
339 resT1 *out1 = nullptr;
340 resT2 *out2 = nullptr;
341 std::size_t nelems_;
342
343public:
344 BinaryTwoOutputsContigFunctor(const argT1 *inp1,
345 const argT2 *inp2,
346 resT1 *res1,
347 resT2 *res2,
348 std::size_t n_elems)
349 : in1(inp1), in2(inp2), out1(res1), out2(res2), nelems_(n_elems)
350 {
351 }
352
353 void operator()(sycl::nd_item<1> ndit) const
354 {
355 static constexpr std::uint8_t elems_per_wi = n_vecs * vec_sz;
356 BinaryOperatorT op{};
357 /* Each work-item processes vec_sz elements, contiguous in memory */
358 /* NOTE: work-group size must be divisible by sub-group size */
359
360 if constexpr (enable_sg_loadstore &&
361 BinaryOperatorT::supports_sg_loadstore::value &&
362 BinaryOperatorT::supports_vec::value && (vec_sz > 1))
363 {
364 auto sg = ndit.get_sub_group();
365 std::uint16_t sgSize = sg.get_max_local_range()[0];
366
367 const std::size_t base =
368 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
369 sg.get_group_id()[0] * sgSize);
370
371 if (base + elems_per_wi * sgSize < nelems_) {
372 sycl::vec<resT1, vec_sz> res1_vec;
373 sycl::vec<resT2, vec_sz> res2_vec;
374
375#pragma unroll
376 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
377 std::size_t offset = base + it * sgSize;
378 auto in1_multi_ptr = sycl::address_space_cast<
379 sycl::access::address_space::global_space,
380 sycl::access::decorated::yes>(&in1[offset]);
381 auto in2_multi_ptr = sycl::address_space_cast<
382 sycl::access::address_space::global_space,
383 sycl::access::decorated::yes>(&in2[offset]);
384 auto out1_multi_ptr = sycl::address_space_cast<
385 sycl::access::address_space::global_space,
386 sycl::access::decorated::yes>(&out1[offset]);
387 auto out2_multi_ptr = sycl::address_space_cast<
388 sycl::access::address_space::global_space,
389 sycl::access::decorated::yes>(&out2[offset]);
390
391 const sycl::vec<argT1, vec_sz> arg1_vec =
392 sub_group_load<vec_sz>(sg, in1_multi_ptr);
393 const sycl::vec<argT2, vec_sz> arg2_vec =
394 sub_group_load<vec_sz>(sg, in2_multi_ptr);
395 res1_vec = op(arg1_vec, arg2_vec, res2_vec);
396 sub_group_store<vec_sz>(sg, res1_vec, out1_multi_ptr);
397 sub_group_store<vec_sz>(sg, res2_vec, out2_multi_ptr);
398 }
399 }
400 else {
401 const std::size_t lane_id = sg.get_local_id()[0];
402 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
403 out1[k] = op(in1[k], in2[k], out2[k]);
404 }
405 }
406 }
407 else if constexpr (enable_sg_loadstore &&
408 BinaryOperatorT::supports_sg_loadstore::value)
409 {
410 auto sg = ndit.get_sub_group();
411 const std::uint16_t sgSize = sg.get_max_local_range()[0];
412
413 const std::size_t base =
414 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
415 sg.get_group_id()[0] * sgSize);
416
417 if (base + elems_per_wi * sgSize < nelems_) {
418#pragma unroll
419 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
420 const std::size_t offset = base + it * sgSize;
421 auto in1_multi_ptr = sycl::address_space_cast<
422 sycl::access::address_space::global_space,
423 sycl::access::decorated::yes>(&in1[offset]);
424 auto in2_multi_ptr = sycl::address_space_cast<
425 sycl::access::address_space::global_space,
426 sycl::access::decorated::yes>(&in2[offset]);
427 auto out1_multi_ptr = sycl::address_space_cast<
428 sycl::access::address_space::global_space,
429 sycl::access::decorated::yes>(&out1[offset]);
430 auto out2_multi_ptr = sycl::address_space_cast<
431 sycl::access::address_space::global_space,
432 sycl::access::decorated::yes>(&out2[offset]);
433
434 const sycl::vec<argT1, vec_sz> arg1_vec =
435 sub_group_load<vec_sz>(sg, in1_multi_ptr);
436 const sycl::vec<argT2, vec_sz> arg2_vec =
437 sub_group_load<vec_sz>(sg, in2_multi_ptr);
438
439 sycl::vec<resT1, vec_sz> res1_vec;
440 sycl::vec<resT2, vec_sz> res2_vec;
441#pragma unroll
442 for (std::uint8_t vec_id = 0; vec_id < vec_sz; ++vec_id) {
443 res1_vec[vec_id] =
444 op(arg1_vec[vec_id], arg2_vec[vec_id],
445 res2_vec[vec_id]);
446 }
447 sub_group_store<vec_sz>(sg, res1_vec, out1_multi_ptr);
448 sub_group_store<vec_sz>(sg, res2_vec, out2_multi_ptr);
449 }
450 }
451 else {
452 const std::size_t lane_id = sg.get_local_id()[0];
453 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
454 out1[k] = op(in1[k], in2[k], out2[k]);
455 }
456 }
457 }
458 else {
459 const std::size_t sgSize =
460 ndit.get_sub_group().get_local_range()[0];
461 const std::size_t gid = ndit.get_global_linear_id();
462 const std::size_t elems_per_sg = sgSize * elems_per_wi;
463
464 const std::size_t start =
465 (gid / sgSize) * (elems_per_sg - sgSize) + gid;
466 const std::size_t end = std::min(nelems_, start + elems_per_sg);
467 for (std::size_t offset = start; offset < end; offset += sgSize) {
468 out1[offset] = op(in1[offset], in2[offset], out2[offset]);
469 }
470 }
471 }
472};
473
481template <typename argT1,
482 typename argT2,
483 typename resT1,
484 typename resT2,
485 typename FourOffsets_IndexerT,
486 typename BinaryOperatorT>
488{
489private:
490 const argT1 *in1 = nullptr;
491 const argT2 *in2 = nullptr;
492 resT1 *out1 = nullptr;
493 resT2 *out2 = nullptr;
494 FourOffsets_IndexerT four_offsets_indexer_;
495
496public:
497 BinaryTwoOutputsStridedFunctor(const argT1 *inp1_tp,
498 const argT2 *inp2_tp,
499 resT1 *res1_tp,
500 resT2 *res2_tp,
501 const FourOffsets_IndexerT &inps_res_indexer)
502 : in1(inp1_tp), in2(inp2_tp), out1(res1_tp), out2(res2_tp),
503 four_offsets_indexer_(inps_res_indexer)
504 {
505 }
506
507 void operator()(sycl::id<1> wid) const
508 {
509 const auto &four_offsets_ =
510 four_offsets_indexer_(static_cast<ssize_t>(wid.get(0)));
511
512 const auto &inp1_offset = four_offsets_.get_first_offset();
513 const auto &inp2_offset = four_offsets_.get_second_offset();
514 const auto &out1_offset = four_offsets_.get_third_offset();
515 const auto &out2_offset = four_offsets_.get_fourth_offset();
516
517 BinaryOperatorT op{};
518 out1[out1_offset] =
519 op(in1[inp1_offset], in2[inp2_offset], out2[out2_offset]);
520 }
521};
522
530template <typename argTy,
531 template <typename T>
532 class UnaryTwoOutputsType,
533 template <typename A,
534 typename R1,
535 typename R2,
536 std::uint8_t vs,
537 std::uint8_t nv,
538 bool enable>
539 class UnaryTwoOutputsContigFunctorT,
540 template <typename A,
541 typename R1,
542 typename R2,
543 std::uint8_t vs,
544 std::uint8_t nv>
545 class kernel_name,
546 std::uint8_t vec_sz = 4u,
547 std::uint8_t n_vecs = 2u>
548sycl::event
549 unary_two_outputs_contig_impl(sycl::queue &exec_q,
550 std::size_t nelems,
551 const char *arg_p,
552 char *res1_p,
553 char *res2_p,
554 const std::vector<sycl::event> &depends = {})
555{
556 static constexpr std::uint8_t elems_per_wi = n_vecs * vec_sz;
557 const std::size_t n_work_items_needed = nelems / elems_per_wi;
558 const std::size_t lws =
559 select_lws(exec_q.get_device(), n_work_items_needed);
560
561 const std::size_t n_groups =
562 ((nelems + lws * elems_per_wi - 1) / (lws * elems_per_wi));
563 const auto gws_range = sycl::range<1>(n_groups * lws);
564 const auto lws_range = sycl::range<1>(lws);
565
566 using resTy1 = typename UnaryTwoOutputsType<argTy>::value_type1;
567 using resTy2 = typename UnaryTwoOutputsType<argTy>::value_type2;
568 using BaseKernelName = kernel_name<argTy, resTy1, resTy2, vec_sz, n_vecs>;
569
570 const argTy *arg_tp = reinterpret_cast<const argTy *>(arg_p);
571 resTy1 *res1_tp = reinterpret_cast<resTy1 *>(res1_p);
572 resTy2 *res2_tp = reinterpret_cast<resTy2 *>(res2_p);
573
574 sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) {
575 cgh.depends_on(depends);
576
577 if (is_aligned<required_alignment>(arg_p) &&
578 is_aligned<required_alignment>(res1_p) &&
579 is_aligned<required_alignment>(res2_p))
580 {
581 static constexpr bool enable_sg_loadstore = true;
582 using KernelName = BaseKernelName;
583 using Impl =
584 UnaryTwoOutputsContigFunctorT<argTy, resTy1, resTy2, vec_sz,
585 n_vecs, enable_sg_loadstore>;
586
587 cgh.parallel_for<KernelName>(
588 sycl::nd_range<1>(gws_range, lws_range),
589 Impl(arg_tp, res1_tp, res2_tp, nelems));
590 }
591 else {
592 static constexpr bool disable_sg_loadstore = false;
593 using KernelName =
594 disabled_sg_loadstore_wrapper_krn<BaseKernelName>;
595 using Impl =
596 UnaryTwoOutputsContigFunctorT<argTy, resTy1, resTy2, vec_sz,
597 n_vecs, disable_sg_loadstore>;
598
599 cgh.parallel_for<KernelName>(
600 sycl::nd_range<1>(gws_range, lws_range),
601 Impl(arg_tp, res1_tp, res2_tp, nelems));
602 }
603 });
604
605 return comp_ev;
606}
607
615template <typename argTy,
616 template <typename T>
617 class UnaryTwoOutputsType,
618 template <typename A, typename R1, typename R2, typename I>
619 class UnaryTwoOutputsStridedFunctorT,
620 template <typename A, typename R1, typename R2, typename I>
621 class kernel_name>
622sycl::event unary_two_outputs_strided_impl(
623 sycl::queue &exec_q,
624 std::size_t nelems,
625 int nd,
626 const ssize_t *shape_and_strides,
627 const char *arg_p,
628 ssize_t arg_offset,
629 char *res1_p,
630 ssize_t res1_offset,
631 char *res2_p,
632 ssize_t res2_offset,
633 const std::vector<sycl::event> &depends,
634 const std::vector<sycl::event> &additional_depends)
635{
636 sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) {
637 cgh.depends_on(depends);
638 cgh.depends_on(additional_depends);
639
640 using res1Ty = typename UnaryTwoOutputsType<argTy>::value_type1;
641 using res2Ty = typename UnaryTwoOutputsType<argTy>::value_type2;
642 using IndexerT =
643 typename dpctl::tensor::offset_utils::ThreeOffsets_StridedIndexer;
644
645 const IndexerT indexer{nd, arg_offset, res1_offset, res2_offset,
646 shape_and_strides};
647
648 const argTy *arg_tp = reinterpret_cast<const argTy *>(arg_p);
649 res1Ty *res1_tp = reinterpret_cast<res1Ty *>(res1_p);
650 res2Ty *res2_tp = reinterpret_cast<res2Ty *>(res2_p);
651
652 using Impl =
653 UnaryTwoOutputsStridedFunctorT<argTy, res1Ty, res2Ty, IndexerT>;
654
655 cgh.parallel_for<kernel_name<argTy, res1Ty, res2Ty, IndexerT>>(
656 {nelems}, Impl(arg_tp, res1_tp, res2_tp, indexer));
657 });
658 return comp_ev;
659}
660
668template <typename argTy1,
669 typename argTy2,
670 template <typename T1, typename T2>
671 class BinaryTwoOutputsType,
672 template <typename T1,
673 typename T2,
674 typename T3,
675 typename T4,
676 std::uint8_t vs,
677 std::uint8_t nv,
678 bool enable_sg_loadstore>
679 class BinaryTwoOutputsContigFunctorT,
680 template <typename T1,
681 typename T2,
682 typename T3,
683 typename T4,
684 std::uint8_t vs,
685 std::uint8_t nv>
686 class kernel_name,
687 std::uint8_t vec_sz = 4u,
688 std::uint8_t n_vecs = 2u>
689sycl::event
690 binary_two_outputs_contig_impl(sycl::queue &exec_q,
691 std::size_t nelems,
692 const char *arg1_p,
693 ssize_t arg1_offset,
694 const char *arg2_p,
695 ssize_t arg2_offset,
696 char *res1_p,
697 ssize_t res1_offset,
698 char *res2_p,
699 ssize_t res2_offset,
700 const std::vector<sycl::event> &depends = {})
701{
702 const std::size_t n_work_items_needed = nelems / (n_vecs * vec_sz);
703 const std::size_t lws =
704 select_lws(exec_q.get_device(), n_work_items_needed);
705
706 const std::size_t n_groups =
707 ((nelems + lws * n_vecs * vec_sz - 1) / (lws * n_vecs * vec_sz));
708 const auto gws_range = sycl::range<1>(n_groups * lws);
709 const auto lws_range = sycl::range<1>(lws);
710
711 using resTy1 = typename BinaryTwoOutputsType<argTy1, argTy2>::value_type1;
712 using resTy2 = typename BinaryTwoOutputsType<argTy1, argTy2>::value_type2;
713 using BaseKernelName =
714 kernel_name<argTy1, argTy2, resTy1, resTy2, vec_sz, n_vecs>;
715
716 const argTy1 *arg1_tp =
717 reinterpret_cast<const argTy1 *>(arg1_p) + arg1_offset;
718 const argTy2 *arg2_tp =
719 reinterpret_cast<const argTy2 *>(arg2_p) + arg2_offset;
720 resTy1 *res1_tp = reinterpret_cast<resTy1 *>(res1_p) + res1_offset;
721 resTy2 *res2_tp = reinterpret_cast<resTy2 *>(res2_p) + res2_offset;
722
723 sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) {
724 cgh.depends_on(depends);
725
726 if (is_aligned<required_alignment>(arg1_tp) &&
727 is_aligned<required_alignment>(arg2_tp) &&
728 is_aligned<required_alignment>(res1_tp) &&
729 is_aligned<required_alignment>(res2_tp))
730 {
731 static constexpr bool enable_sg_loadstore = true;
732 using KernelName = BaseKernelName;
733 using Impl = BinaryTwoOutputsContigFunctorT<argTy1, argTy2, resTy1,
734 resTy2, vec_sz, n_vecs,
735 enable_sg_loadstore>;
736
737 cgh.parallel_for<KernelName>(
738 sycl::nd_range<1>(gws_range, lws_range),
739 Impl(arg1_tp, arg2_tp, res1_tp, res2_tp, nelems));
740 }
741 else {
742 static constexpr bool disable_sg_loadstore = false;
743 using KernelName =
744 disabled_sg_loadstore_wrapper_krn<BaseKernelName>;
745 using Impl = BinaryTwoOutputsContigFunctorT<argTy1, argTy2, resTy1,
746 resTy2, vec_sz, n_vecs,
747 disable_sg_loadstore>;
748
749 cgh.parallel_for<KernelName>(
750 sycl::nd_range<1>(gws_range, lws_range),
751 Impl(arg1_tp, arg2_tp, res1_tp, res2_tp, nelems));
752 }
753 });
754 return comp_ev;
755}
756
764template <
765 typename argTy1,
766 typename argTy2,
767 template <typename T1, typename T2>
768 class BinaryTwoOutputsType,
769 template <typename T1, typename T2, typename T3, typename T4, typename IndT>
770 class BinaryTwoOutputsStridedFunctorT,
771 template <typename T1, typename T2, typename T3, typename T4, typename IndT>
772 class kernel_name>
773sycl::event binary_two_outputs_strided_impl(
774 sycl::queue &exec_q,
775 std::size_t nelems,
776 int nd,
777 const ssize_t *shape_and_strides,
778 const char *arg1_p,
779 ssize_t arg1_offset,
780 const char *arg2_p,
781 ssize_t arg2_offset,
782 char *res1_p,
783 ssize_t res1_offset,
784 char *res2_p,
785 ssize_t res2_offset,
786 const std::vector<sycl::event> &depends,
787 const std::vector<sycl::event> &additional_depends)
788{
789 sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) {
790 cgh.depends_on(depends);
791 cgh.depends_on(additional_depends);
792
793 using resTy1 =
794 typename BinaryTwoOutputsType<argTy1, argTy2>::value_type1;
795 using resTy2 =
796 typename BinaryTwoOutputsType<argTy1, argTy2>::value_type2;
797
798 using IndexerT =
799 typename dpctl::tensor::offset_utils::FourOffsets_StridedIndexer;
800
801 const IndexerT indexer{nd, arg1_offset, arg2_offset,
802 res1_offset, res2_offset, shape_and_strides};
803
804 const argTy1 *arg1_tp = reinterpret_cast<const argTy1 *>(arg1_p);
805 const argTy2 *arg2_tp = reinterpret_cast<const argTy2 *>(arg2_p);
806 resTy1 *res1_tp = reinterpret_cast<resTy1 *>(res1_p);
807 resTy2 *res2_tp = reinterpret_cast<resTy2 *>(res2_p);
808
809 using Impl = BinaryTwoOutputsStridedFunctorT<argTy1, argTy2, resTy1,
810 resTy2, IndexerT>;
811
812 cgh.parallel_for<kernel_name<argTy1, argTy2, resTy1, resTy2, IndexerT>>(
813 {nelems}, Impl(arg1_tp, arg2_tp, res1_tp, res2_tp, indexer));
814 });
815 return comp_ev;
816}
817
818// Typedefs for function pointers
819
820typedef sycl::event (*unary_two_outputs_contig_impl_fn_ptr_t)(
821 sycl::queue &,
822 std::size_t,
823 const char *,
824 char *,
825 char *,
826 const std::vector<sycl::event> &);
827
828typedef sycl::event (*unary_two_outputs_strided_impl_fn_ptr_t)(
829 sycl::queue &,
830 std::size_t,
831 int,
832 const ssize_t *,
833 const char *,
834 ssize_t,
835 char *,
836 ssize_t,
837 char *,
838 ssize_t,
839 const std::vector<sycl::event> &,
840 const std::vector<sycl::event> &);
841
842typedef sycl::event (*binary_two_outputs_contig_impl_fn_ptr_t)(
843 sycl::queue &,
844 std::size_t,
845 const char *,
846 ssize_t,
847 const char *,
848 ssize_t,
849 char *,
850 ssize_t,
851 char *,
852 ssize_t,
853 const std::vector<sycl::event> &);
854
855typedef sycl::event (*binary_two_outputs_strided_impl_fn_ptr_t)(
856 sycl::queue &,
857 std::size_t,
858 int,
859 const ssize_t *,
860 const char *,
861 ssize_t,
862 const char *,
863 ssize_t,
864 char *,
865 ssize_t,
866 char *,
867 ssize_t,
868 const std::vector<sycl::event> &,
869 const std::vector<sycl::event> &);
870
871} // namespace dpnp::extensions::py_internal::elementwise_common
Functor for evaluation of a binary function with two output arrays on contiguous arrays.
Definition common.hpp:335
Functor for evaluation of a binary function with two output arrays on strided data.
Definition common.hpp:488
Functor for evaluation of a unary function with two output arrays on contiguous arrays.
Definition common.hpp:71
Functor for evaluation of a unary function with two output arrays on strided data.
Definition common.hpp:289