36#include <sycl/sycl.hpp>
38#include "kernels/alignment.hpp"
39#include "kernels/dpctl_tensor_types.hpp"
40#include "kernels/elementwise_functions/sycl_complex.hpp"
41#include "utils/offset_utils.hpp"
42#include "utils/sycl_utils.hpp"
43#include "utils/type_utils.hpp"
45namespace dpnp::kernels::isclose
49inline bool isclose(
const T a,
55 static_assert(std::is_floating_point_v<T> || std::is_same_v<T, sycl::half>);
57 if (sycl::isfinite(a) && sycl::isfinite(b)) {
58 return sycl::fabs(a - b) <= atol + rtol * sycl::fabs(b);
61 if (sycl::isnan(a) && sycl::isnan(b)) {
69inline bool isclose(
const std::complex<T> a,
70 const std::complex<T> b,
75 const bool a_finite = sycl::isfinite(a.real()) && sycl::isfinite(a.imag());
76 const bool b_finite = sycl::isfinite(b.real()) && sycl::isfinite(b.imag());
78 if (a_finite && b_finite) {
79 return exprm_ns::abs(exprm_ns::complex<T>(a - b)) <=
80 atol + rtol * exprm_ns::abs(exprm_ns::complex<T>(b));
83 if (sycl::isnan(a.real()) && sycl::isnan(a.imag()) &&
84 sycl::isnan(b.real()) && sycl::isnan(b.imag()))
95 typename ThreeOffsets_IndexerT>
99 const T *a_ =
nullptr;
100 const T *b_ =
nullptr;
101 resTy *out_ =
nullptr;
102 const ThreeOffsets_IndexerT three_offsets_indexer_;
105 const bool equal_nan_;
111 const ThreeOffsets_IndexerT &inps_res_indexer,
114 const bool equal_nan)
115 : a_(a), b_(b), out_(out), three_offsets_indexer_(inps_res_indexer),
116 rtol_(rtol), atol_(atol), equal_nan_(equal_nan)
120 void operator()(sycl::id<1> wid)
const
122 const auto &three_offsets_ = three_offsets_indexer_(wid.get(0));
123 const dpctl::tensor::ssize_t &inp1_offset =
124 three_offsets_.get_first_offset();
125 const dpctl::tensor::ssize_t &inp2_offset =
126 three_offsets_.get_second_offset();
127 const dpctl::tensor::ssize_t &out_offset =
128 three_offsets_.get_third_offset();
131 isclose(a_[inp1_offset], b_[inp2_offset], rtol_, atol_, equal_nan_);
138 std::uint8_t vec_sz = 4u,
139 std::uint8_t n_vecs = 2u,
140 bool enable_sg_loadstore =
true>
144 const T *a_ =
nullptr;
145 const T *b_ =
nullptr;
146 resTy *out_ =
nullptr;
150 const bool equal_nan_;
156 const std::size_t n_elems,
159 const bool equal_nan)
160 : a_(a), b_(b), out_(out), nelems_(n_elems), rtol_(rtol), atol_(atol),
161 equal_nan_(equal_nan)
165 void operator()(sycl::nd_item<1> ndit)
const
167 constexpr std::uint8_t elems_per_wi = n_vecs * vec_sz;
171 using dpctl::tensor::type_utils::is_complex_v;
172 if constexpr (enable_sg_loadstore && !is_complex_v<T>) {
173 auto sg = ndit.get_sub_group();
174 const std::uint16_t sgSize = sg.get_max_local_range()[0];
175 const std::size_t base =
176 elems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) +
177 sg.get_group_id()[0] * sgSize);
179 if (base + elems_per_wi * sgSize < nelems_) {
180 using dpctl::tensor::sycl_utils::sub_group_load;
181 using dpctl::tensor::sycl_utils::sub_group_store;
183 for (std::uint8_t it = 0; it < elems_per_wi; it += vec_sz) {
184 const std::size_t offset = base + it * sgSize;
185 auto a_multi_ptr = sycl::address_space_cast<
186 sycl::access::address_space::global_space,
187 sycl::access::decorated::yes>(&a_[offset]);
188 auto b_multi_ptr = sycl::address_space_cast<
189 sycl::access::address_space::global_space,
190 sycl::access::decorated::yes>(&b_[offset]);
191 auto out_multi_ptr = sycl::address_space_cast<
192 sycl::access::address_space::global_space,
193 sycl::access::decorated::yes>(&out_[offset]);
195 const sycl::vec<T, vec_sz> a_vec =
196 sub_group_load<vec_sz>(sg, a_multi_ptr);
197 const sycl::vec<T, vec_sz> b_vec =
198 sub_group_load<vec_sz>(sg, b_multi_ptr);
200 sycl::vec<resTy, vec_sz> res_vec;
202 for (std::uint8_t vec_id = 0; vec_id < vec_sz; ++vec_id) {
203 res_vec[vec_id] = isclose(a_vec[vec_id], b_vec[vec_id],
204 rtol_, atol_, equal_nan_);
206 sub_group_store<vec_sz>(sg, res_vec, out_multi_ptr);
210 const std::size_t lane_id = sg.get_local_id()[0];
211 for (std::size_t k = base + lane_id; k < nelems_; k += sgSize) {
212 out_[k] = isclose(a_[k], b_[k], rtol_, atol_, equal_nan_);
217 const std::uint16_t sgSize =
218 ndit.get_sub_group().get_local_range()[0];
219 const std::size_t gid = ndit.get_global_linear_id();
220 const std::uint16_t elems_per_sg = sgSize * elems_per_wi;
222 const std::size_t start =
223 (gid / sgSize) * (elems_per_sg - sgSize) + gid;
224 const std::size_t end = std::min(nelems_, start + elems_per_sg);
225 for (std::size_t offset = start; offset < end; offset += sgSize) {
227 isclose(a_[offset], b_[offset], rtol_, atol_, equal_nan_);
233template <
typename T,
typename scT>
235 isclose_strided_scalar_impl(sycl::queue &exec_q,
238 const dpctl::tensor::ssize_t *shape_strides,
241 const bool equal_nan,
243 const dpctl::tensor::ssize_t a_offset,
245 const dpctl::tensor::ssize_t b_offset,
247 const dpctl::tensor::ssize_t out_offset,
248 const std::vector<sycl::event> &depends)
250 dpctl::tensor::type_utils::validate_type_for_device<T>(exec_q);
252 const T *a_tp =
reinterpret_cast<const T *
>(a_cp);
253 const T *b_tp =
reinterpret_cast<const T *
>(b_cp);
256 resTy *out_tp =
reinterpret_cast<resTy *
>(out_cp);
259 typename dpctl::tensor::offset_utils::ThreeOffsets_StridedIndexer;
260 const IndexerT indexer{nd, a_offset, b_offset, out_offset, shape_strides};
262 sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) {
263 cgh.depends_on(depends);
266 IsCloseStridedScalarFunctor<T, scT, resTy, IndexerT>;
267 cgh.parallel_for<IsCloseFunc>(
269 IsCloseFunc(a_tp, b_tp, out_tp, indexer, rtol, atol, equal_nan));
276 std::uint8_t vec_sz = 4u,
277 std::uint8_t n_vecs = 2u>
279 isclose_contig_scalar_impl(sycl::queue &exec_q,
283 const bool equal_nan,
287 const std::vector<sycl::event> &depends = {})
289 constexpr std::uint8_t elems_per_wi = n_vecs * vec_sz;
290 const std::size_t n_work_items_needed = nelems / elems_per_wi;
291 const std::size_t empirical_threshold = std::size_t(1) << 21;
292 const std::size_t lws = (n_work_items_needed <= empirical_threshold)
296 const std::size_t n_groups =
297 ((nelems + lws * elems_per_wi - 1) / (lws * elems_per_wi));
298 const auto gws_range = sycl::range<1>(n_groups * lws);
299 const auto lws_range = sycl::range<1>(lws);
301 const T *a_tp =
reinterpret_cast<const T *
>(a_cp);
302 const T *b_tp =
reinterpret_cast<const T *
>(b_cp);
305 resTy *out_tp =
reinterpret_cast<resTy *
>(out_cp);
307 sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) {
308 cgh.depends_on(depends);
310 using dpctl::tensor::kernels::alignment_utils::is_aligned;
311 using dpctl::tensor::kernels::alignment_utils::required_alignment;
312 if (is_aligned<required_alignment>(a_tp) &&
313 is_aligned<required_alignment>(b_tp) &&
314 is_aligned<required_alignment>(out_tp))
316 constexpr bool enable_sg_loadstore =
true;
318 IsCloseContigScalarFunctor<T, scT, resTy, vec_sz, n_vecs,
319 enable_sg_loadstore>;
321 cgh.parallel_for<IsCloseFunc>(
322 sycl::nd_range<1>(gws_range, lws_range),
323 IsCloseFunc(a_tp, b_tp, out_tp, nelems, rtol, atol, equal_nan));
326 constexpr bool disable_sg_loadstore =
false;
328 IsCloseContigScalarFunctor<T, scT, resTy, vec_sz, n_vecs,
329 disable_sg_loadstore>;
331 cgh.parallel_for<IsCloseFunc>(
332 sycl::nd_range<1>(gws_range, lws_range),
333 IsCloseFunc(a_tp, b_tp, out_tp, nelems, rtol, atol, equal_nan));