2 pybind11/numpy.h: Basic NumPy support, vectorize() wrapper
4 Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
6 All rights reserved. Use of this source code is governed by a
7 BSD-style license that can be found in the LICENSE file.
28 # pragma warning(push)
29 # pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
32 /* This will be true on all flat address space platforms and allows us to reduce the
33 whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size
34 and dimension types (e.g. shape, strides, indexing), instead of inflicting this
35 upon the library user. */
36 static_assert(sizeof(ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
38 NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
40 class array; // Forward declaration
42 NAMESPACE_BEGIN(detail)
43 template <typename type, typename SFINAE = void> struct npy_format_descriptor;
45 struct PyArrayDescr_Proxy {
60 struct PyArray_Proxy {
71 struct PyVoidScalarObject_Proxy {
74 PyArrayDescr_Proxy *descr;
79 struct numpy_type_info {
81 std::string format_str;
84 struct numpy_internals {
85 std::unordered_map<std::type_index, numpy_type_info> registered_dtypes;
87 numpy_type_info *get_type_info(const std::type_info& tinfo, bool throw_if_missing = true) {
88 auto it = registered_dtypes.find(std::type_index(tinfo));
89 if (it != registered_dtypes.end())
92 pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name());
96 template<typename T> numpy_type_info *get_type_info(bool throw_if_missing = true) {
97 return get_type_info(typeid(typename std::remove_cv<T>::type), throw_if_missing);
101 inline PYBIND11_NOINLINE void load_numpy_internals(numpy_internals* &ptr) {
102 ptr = &get_or_create_shared_data<numpy_internals>("_numpy_internals");
105 inline numpy_internals& get_numpy_internals() {
106 static numpy_internals* ptr = nullptr;
108 load_numpy_internals(ptr);
112 template <typename T> struct same_size {
113 template <typename U> using as = bool_constant<sizeof(T) == sizeof(U)>;
116 template <typename Concrete> constexpr int platform_lookup() { return -1; }
118 // Lookup a type according to its size, and return a value corresponding to the NumPy typenum.
119 template <typename Concrete, typename T, typename... Ts, typename... Ints>
120 constexpr int platform_lookup(int I, Ints... Is) {
121 return sizeof(Concrete) == sizeof(T) ? I : platform_lookup<Concrete, Ts...>(Is...);
126 NPY_ARRAY_C_CONTIGUOUS_ = 0x0001,
127 NPY_ARRAY_F_CONTIGUOUS_ = 0x0002,
128 NPY_ARRAY_OWNDATA_ = 0x0004,
129 NPY_ARRAY_FORCECAST_ = 0x0010,
130 NPY_ARRAY_ENSUREARRAY_ = 0x0040,
131 NPY_ARRAY_ALIGNED_ = 0x0100,
132 NPY_ARRAY_WRITEABLE_ = 0x0400,
134 NPY_BYTE_, NPY_UBYTE_,
135 NPY_SHORT_, NPY_USHORT_,
137 NPY_LONG_, NPY_ULONG_,
138 NPY_LONGLONG_, NPY_ULONGLONG_,
139 NPY_FLOAT_, NPY_DOUBLE_, NPY_LONGDOUBLE_,
140 NPY_CFLOAT_, NPY_CDOUBLE_, NPY_CLONGDOUBLE_,
142 NPY_STRING_, NPY_UNICODE_, NPY_VOID_,
143 // Platform-dependent normalization
144 NPY_INT8_ = NPY_BYTE_,
145 NPY_UINT8_ = NPY_UBYTE_,
146 NPY_INT16_ = NPY_SHORT_,
147 NPY_UINT16_ = NPY_USHORT_,
148 // `npy_common.h` defines the integer aliases. In order, it checks:
149 // NPY_BITSOF_LONG, NPY_BITSOF_LONGLONG, NPY_BITSOF_INT, NPY_BITSOF_SHORT, NPY_BITSOF_CHAR
150 // and assigns the alias to the first matching size, so we should check in this order.
151 NPY_INT32_ = platform_lookup<std::int32_t, long, int, short>(
152 NPY_LONG_, NPY_INT_, NPY_SHORT_),
153 NPY_UINT32_ = platform_lookup<std::uint32_t, unsigned long, unsigned int, unsigned short>(
154 NPY_ULONG_, NPY_UINT_, NPY_USHORT_),
155 NPY_INT64_ = platform_lookup<std::int64_t, long, long long, int>(
156 NPY_LONG_, NPY_LONGLONG_, NPY_INT_),
157 NPY_UINT64_ = platform_lookup<std::uint64_t, unsigned long, unsigned long long, unsigned int>(
158 NPY_ULONG_, NPY_ULONGLONG_, NPY_UINT_),
166 static npy_api& get() {
167 static npy_api api = lookup();
171 bool PyArray_Check_(PyObject *obj) const {
172 return (bool) PyObject_TypeCheck(obj, PyArray_Type_);
174 bool PyArrayDescr_Check_(PyObject *obj) const {
175 return (bool) PyObject_TypeCheck(obj, PyArrayDescr_Type_);
178 unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();
179 PyObject *(*PyArray_DescrFromType_)(int);
180 PyObject *(*PyArray_NewFromDescr_)
181 (PyTypeObject *, PyObject *, int, Py_intptr_t *,
182 Py_intptr_t *, void *, int, PyObject *);
183 PyObject *(*PyArray_DescrNewFromType_)(int);
184 int (*PyArray_CopyInto_)(PyObject *, PyObject *);
185 PyObject *(*PyArray_NewCopy_)(PyObject *, int);
186 PyTypeObject *PyArray_Type_;
187 PyTypeObject *PyVoidArrType_Type_;
188 PyTypeObject *PyArrayDescr_Type_;
189 PyObject *(*PyArray_DescrFromScalar_)(PyObject *);
190 PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *);
191 int (*PyArray_DescrConverter_) (PyObject *, PyObject **);
192 bool (*PyArray_EquivTypes_) (PyObject *, PyObject *);
193 int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, char, PyObject **, int *,
194 Py_ssize_t *, PyObject **, PyObject *);
195 PyObject *(*PyArray_Squeeze_)(PyObject *);
196 int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);
197 PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int);
200 API_PyArray_GetNDArrayCFeatureVersion = 211,
201 API_PyArray_Type = 2,
202 API_PyArrayDescr_Type = 3,
203 API_PyVoidArrType_Type = 39,
204 API_PyArray_DescrFromType = 45,
205 API_PyArray_DescrFromScalar = 57,
206 API_PyArray_FromAny = 69,
207 API_PyArray_Resize = 80,
208 API_PyArray_CopyInto = 82,
209 API_PyArray_NewCopy = 85,
210 API_PyArray_NewFromDescr = 94,
211 API_PyArray_DescrNewFromType = 9,
212 API_PyArray_DescrConverter = 174,
213 API_PyArray_EquivTypes = 182,
214 API_PyArray_GetArrayParamsFromObject = 278,
215 API_PyArray_Squeeze = 136,
216 API_PyArray_SetBaseObject = 282
219 static npy_api lookup() {
220 module m = module::import("numpy.core.multiarray");
221 auto c = m.attr("_ARRAY_API");
222 #if PY_MAJOR_VERSION >= 3
223 void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL);
225 void **api_ptr = (void **) PyCObject_AsVoidPtr(c.ptr());
228 #define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func];
229 DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion);
230 if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7)
231 pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0");
232 DECL_NPY_API(PyArray_Type);
233 DECL_NPY_API(PyVoidArrType_Type);
234 DECL_NPY_API(PyArrayDescr_Type);
235 DECL_NPY_API(PyArray_DescrFromType);
236 DECL_NPY_API(PyArray_DescrFromScalar);
237 DECL_NPY_API(PyArray_FromAny);
238 DECL_NPY_API(PyArray_Resize);
239 DECL_NPY_API(PyArray_CopyInto);
240 DECL_NPY_API(PyArray_NewCopy);
241 DECL_NPY_API(PyArray_NewFromDescr);
242 DECL_NPY_API(PyArray_DescrNewFromType);
243 DECL_NPY_API(PyArray_DescrConverter);
244 DECL_NPY_API(PyArray_EquivTypes);
245 DECL_NPY_API(PyArray_GetArrayParamsFromObject);
246 DECL_NPY_API(PyArray_Squeeze);
247 DECL_NPY_API(PyArray_SetBaseObject);
253 inline PyArray_Proxy* array_proxy(void* ptr) {
254 return reinterpret_cast<PyArray_Proxy*>(ptr);
257 inline const PyArray_Proxy* array_proxy(const void* ptr) {
258 return reinterpret_cast<const PyArray_Proxy*>(ptr);
261 inline PyArrayDescr_Proxy* array_descriptor_proxy(PyObject* ptr) {
262 return reinterpret_cast<PyArrayDescr_Proxy*>(ptr);
265 inline const PyArrayDescr_Proxy* array_descriptor_proxy(const PyObject* ptr) {
266 return reinterpret_cast<const PyArrayDescr_Proxy*>(ptr);
269 inline bool check_flags(const void* ptr, int flag) {
270 return (flag == (array_proxy(ptr)->flags & flag));
273 template <typename T> struct is_std_array : std::false_type { };
274 template <typename T, size_t N> struct is_std_array<std::array<T, N>> : std::true_type { };
275 template <typename T> struct is_complex : std::false_type { };
276 template <typename T> struct is_complex<std::complex<T>> : std::true_type { };
278 template <typename T> struct array_info_scalar {
280 static constexpr bool is_array = false;
281 static constexpr bool is_empty = false;
282 static constexpr auto extents = _("");
283 static void append_extents(list& /* shape */) { }
285 // Computes underlying type and a comma-separated list of extents for array
286 // types (any mix of std::array and built-in arrays). An array of char is
287 // treated as scalar because it gets special handling.
288 template <typename T> struct array_info : array_info_scalar<T> { };
289 template <typename T, size_t N> struct array_info<std::array<T, N>> {
290 using type = typename array_info<T>::type;
291 static constexpr bool is_array = true;
292 static constexpr bool is_empty = (N == 0) || array_info<T>::is_empty;
293 static constexpr size_t extent = N;
295 // appends the extents to shape
296 static void append_extents(list& shape) {
298 array_info<T>::append_extents(shape);
301 static constexpr auto extents = _<array_info<T>::is_array>(
302 concat(_<N>(), array_info<T>::extents), _<N>()
305 // For numpy we have special handling for arrays of characters, so we don't include
306 // the size in the array extents.
307 template <size_t N> struct array_info<char[N]> : array_info_scalar<char[N]> { };
308 template <size_t N> struct array_info<std::array<char, N>> : array_info_scalar<std::array<char, N>> { };
309 template <typename T, size_t N> struct array_info<T[N]> : array_info<std::array<T, N>> { };
310 template <typename T> using remove_all_extents_t = typename array_info<T>::type;
312 template <typename T> using is_pod_struct = all_of<
313 std::is_standard_layout<T>, // since we're accessing directly in memory we need a standard layout type
314 #if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI)
315 // _GLIBCXX_USE_CXX11_ABI indicates that we're using libstdc++ from GCC 5 or newer, independent
316 // of the actual compiler (Clang can also use libstdc++, but it always defines __GNUC__ == 4).
317 std::is_trivially_copyable<T>,
319 // GCC 4 doesn't implement is_trivially_copyable, so approximate it
320 std::is_trivially_destructible<T>,
321 satisfies_any_of<T, std::has_trivial_copy_constructor, std::has_trivial_copy_assign>,
323 satisfies_none_of<T, std::is_reference, std::is_array, is_std_array, std::is_arithmetic, is_complex, std::is_enum>
326 template <ssize_t Dim = 0, typename Strides> ssize_t byte_offset_unsafe(const Strides &) { return 0; }
327 template <ssize_t Dim = 0, typename Strides, typename... Ix>
328 ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {
329 return i * strides[Dim] + byte_offset_unsafe<Dim + 1>(strides, index...);
333 * Proxy class providing unsafe, unchecked const access to array data. This is constructed through
334 * the `unchecked<T, N>()` method of `array` or the `unchecked<N>()` method of `array_t<T>`. `Dims`
335 * will be -1 for dimensions determined at runtime.
337 template <typename T, ssize_t Dims>
338 class unchecked_reference {
340 static constexpr bool Dynamic = Dims < 0;
341 const unsigned char *data_;
342 // Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to
343 // make large performance gains on big, nested loops, but requires compile-time dimensions
344 conditional_t<Dynamic, const ssize_t *, std::array<ssize_t, (size_t) Dims>>
348 friend class pybind11::array;
349 // Constructor for compile-time dimensions:
350 template <bool Dyn = Dynamic>
351 unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<!Dyn, ssize_t>)
352 : data_{reinterpret_cast<const unsigned char *>(data)}, dims_{Dims} {
353 for (size_t i = 0; i < (size_t) dims_; i++) {
354 shape_[i] = shape[i];
355 strides_[i] = strides[i];
358 // Constructor for runtime dimensions:
359 template <bool Dyn = Dynamic>
360 unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<Dyn, ssize_t> dims)
361 : data_{reinterpret_cast<const unsigned char *>(data)}, shape_{shape}, strides_{strides}, dims_{dims} {}
365 * Unchecked const reference access to data at the given indices. For a compile-time known
366 * number of dimensions, this requires the correct number of arguments; for run-time
367 * dimensionality, this is not checked (and so is up to the caller to use safely).
369 template <typename... Ix> const T &operator()(Ix... index) const {
370 static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
371 "Invalid number of indices for unchecked array reference");
372 return *reinterpret_cast<const T *>(data_ + byte_offset_unsafe(strides_, ssize_t(index)...));
375 * Unchecked const reference access to data; this operator only participates if the reference
376 * is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`.
378 template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
379 const T &operator[](ssize_t index) const { return operator()(index); }
381 /// Pointer access to the data at the given indices.
382 template <typename... Ix> const T *data(Ix... ix) const { return &operator()(ssize_t(ix)...); }
384 /// Returns the item size, i.e. sizeof(T)
385 constexpr static ssize_t itemsize() { return sizeof(T); }
387 /// Returns the shape (i.e. size) of dimension `dim`
388 ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; }
390 /// Returns the number of dimensions of the array
391 ssize_t ndim() const { return dims_; }
393 /// Returns the total number of elements in the referenced array, i.e. the product of the shapes
394 template <bool Dyn = Dynamic>
395 enable_if_t<!Dyn, ssize_t> size() const {
396 return std::accumulate(shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies<ssize_t>());
398 template <bool Dyn = Dynamic>
399 enable_if_t<Dyn, ssize_t> size() const {
400 return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
403 /// Returns the total number of bytes used by the referenced data. Note that the actual span in
404 /// memory may be larger if the referenced array has non-contiguous strides (e.g. for a slice).
405 ssize_t nbytes() const {
406 return size() * itemsize();
410 template <typename T, ssize_t Dims>
411 class unchecked_mutable_reference : public unchecked_reference<T, Dims> {
412 friend class pybind11::array;
413 using ConstBase = unchecked_reference<T, Dims>;
414 using ConstBase::ConstBase;
415 using ConstBase::Dynamic;
417 /// Mutable, unchecked access to data at the given indices.
418 template <typename... Ix> T& operator()(Ix... index) {
419 static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
420 "Invalid number of indices for unchecked array reference");
421 return const_cast<T &>(ConstBase::operator()(index...));
424 * Mutable, unchecked access data at the given index; this operator only participates if the
425 * reference is to a 1-dimensional array (or has runtime dimensions). When present, this is
426 * exactly equivalent to `obj(index)`.
428 template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
429 T &operator[](ssize_t index) { return operator()(index); }
431 /// Mutable pointer access to the data at the given indices.
432 template <typename... Ix> T *mutable_data(Ix... ix) { return &operator()(ssize_t(ix)...); }
435 template <typename T, ssize_t Dim>
436 struct type_caster<unchecked_reference<T, Dim>> {
437 static_assert(Dim == 0 && Dim > 0 /* always fail */, "unchecked array proxy object is not castable");
439 template <typename T, ssize_t Dim>
440 struct type_caster<unchecked_mutable_reference<T, Dim>> : type_caster<unchecked_reference<T, Dim>> {};
442 NAMESPACE_END(detail)
444 class dtype : public object {
446 PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_);
448 explicit dtype(const buffer_info &info) {
449 dtype descr(_dtype_from_pep3118()(PYBIND11_STR_TYPE(info.format)));
450 // If info.itemsize == 0, use the value calculated from the format string
451 m_ptr = descr.strip_padding(info.itemsize ? info.itemsize : descr.itemsize()).release().ptr();
454 explicit dtype(const std::string &format) {
455 m_ptr = from_args(pybind11::str(format)).release().ptr();
458 dtype(const char *format) : dtype(std::string(format)) { }
460 dtype(list names, list formats, list offsets, ssize_t itemsize) {
462 args["names"] = names;
463 args["formats"] = formats;
464 args["offsets"] = offsets;
465 args["itemsize"] = pybind11::int_(itemsize);
466 m_ptr = from_args(args).release().ptr();
469 /// This is essentially the same as calling numpy.dtype(args) in Python.
470 static dtype from_args(object args) {
471 PyObject *ptr = nullptr;
472 if (!detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) || !ptr)
473 throw error_already_set();
474 return reinterpret_steal<dtype>(ptr);
477 /// Return dtype associated with a C++ type.
478 template <typename T> static dtype of() {
479 return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::dtype();
482 /// Size of the data type in bytes.
483 ssize_t itemsize() const {
484 return detail::array_descriptor_proxy(m_ptr)->elsize;
487 /// Returns true for structured data types.
488 bool has_fields() const {
489 return detail::array_descriptor_proxy(m_ptr)->names != nullptr;
492 /// Single-character type code.
494 return detail::array_descriptor_proxy(m_ptr)->kind;
498 static object _dtype_from_pep3118() {
499 static PyObject *obj = module::import("numpy.core._internal")
500 .attr("_dtype_from_pep3118").cast<object>().release().ptr();
501 return reinterpret_borrow<object>(obj);
504 dtype strip_padding(ssize_t itemsize) {
505 // Recursively strip all void fields with empty names that are generated for
506 // padding fields (as of NumPy v1.11).
510 struct field_descr { PYBIND11_STR_TYPE name; object format; pybind11::int_ offset; };
511 std::vector<field_descr> field_descriptors;
513 for (auto field : attr("fields").attr("items")()) {
514 auto spec = field.cast<tuple>();
515 auto name = spec[0].cast<pybind11::str>();
516 auto format = spec[1].cast<tuple>()[0].cast<dtype>();
517 auto offset = spec[1].cast<tuple>()[1].cast<pybind11::int_>();
518 if (!len(name) && format.kind() == 'V')
520 field_descriptors.push_back({(PYBIND11_STR_TYPE) name, format.strip_padding(format.itemsize()), offset});
523 std::sort(field_descriptors.begin(), field_descriptors.end(),
524 [](const field_descr& a, const field_descr& b) {
525 return a.offset.cast<int>() < b.offset.cast<int>();
528 list names, formats, offsets;
529 for (auto& descr : field_descriptors) {
530 names.append(descr.name);
531 formats.append(descr.format);
532 offsets.append(descr.offset);
534 return dtype(names, formats, offsets, itemsize);
538 class array : public buffer {
540 PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array)
543 c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_,
544 f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_,
545 forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_
548 array() : array({{0}}, static_cast<const double *>(nullptr)) {}
550 using ShapeContainer = detail::any_container<ssize_t>;
551 using StridesContainer = detail::any_container<ssize_t>;
553 // Constructs an array taking shape/strides from arbitrary container types
554 array(const pybind11::dtype &dt, ShapeContainer shape, StridesContainer strides,
555 const void *ptr = nullptr, handle base = handle()) {
557 if (strides->empty())
558 *strides = c_strides(*shape, dt.itemsize());
560 auto ndim = shape->size();
561 if (ndim != strides->size())
562 pybind11_fail("NumPy: shape ndim doesn't match strides ndim");
567 if (isinstance<array>(base))
568 /* Copy flags from base (except ownership bit) */
569 flags = reinterpret_borrow<array>(base).flags() & ~detail::npy_api::NPY_ARRAY_OWNDATA_;
571 /* Writable by default, easy to downgrade later on if needed */
572 flags = detail::npy_api::NPY_ARRAY_WRITEABLE_;
575 auto &api = detail::npy_api::get();
576 auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(
577 api.PyArray_Type_, descr.release().ptr(), (int) ndim, shape->data(), strides->data(),
578 const_cast<void *>(ptr), flags, nullptr));
580 throw error_already_set();
583 api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr());
585 tmp = reinterpret_steal<object>(api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */));
588 m_ptr = tmp.release().ptr();
591 array(const pybind11::dtype &dt, ShapeContainer shape, const void *ptr = nullptr, handle base = handle())
592 : array(dt, std::move(shape), {}, ptr, base) { }
594 template <typename T, typename = detail::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value>>
595 array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle())
596 : array(dt, {{count}}, ptr, base) { }
598 template <typename T>
599 array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle())
600 : array(pybind11::dtype::of<T>(), std::move(shape), std::move(strides), ptr, base) { }
602 template <typename T>
603 array(ShapeContainer shape, const T *ptr, handle base = handle())
604 : array(std::move(shape), {}, ptr, base) { }
606 template <typename T>
607 explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { }
609 explicit array(const buffer_info &info)
610 : array(pybind11::dtype(info), info.shape, info.strides, info.ptr) { }
612 /// Array descriptor (dtype)
613 pybind11::dtype dtype() const {
614 return reinterpret_borrow<pybind11::dtype>(detail::array_proxy(m_ptr)->descr);
617 /// Total number of elements
618 ssize_t size() const {
619 return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
622 /// Byte size of a single element
623 ssize_t itemsize() const {
624 return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize;
627 /// Total number of bytes
628 ssize_t nbytes() const {
629 return size() * itemsize();
632 /// Number of dimensions
633 ssize_t ndim() const {
634 return detail::array_proxy(m_ptr)->nd;
638 object base() const {
639 return reinterpret_borrow<object>(detail::array_proxy(m_ptr)->base);
642 /// Dimensions of the array
643 const ssize_t* shape() const {
644 return detail::array_proxy(m_ptr)->dimensions;
647 /// Dimension along a given axis
648 ssize_t shape(ssize_t dim) const {
650 fail_dim_check(dim, "invalid axis");
654 /// Strides of the array
655 const ssize_t* strides() const {
656 return detail::array_proxy(m_ptr)->strides;
659 /// Stride along a given axis
660 ssize_t strides(ssize_t dim) const {
662 fail_dim_check(dim, "invalid axis");
663 return strides()[dim];
666 /// Return the NumPy array flags
668 return detail::array_proxy(m_ptr)->flags;
671 /// If set, the array is writeable (otherwise the buffer is read-only)
672 bool writeable() const {
673 return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_);
676 /// If set, the array owns the data (will be freed when the array is deleted)
677 bool owndata() const {
678 return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_);
681 /// Pointer to the contained data. If index is not provided, points to the
682 /// beginning of the buffer. May throw if the index would lead to out of bounds access.
683 template<typename... Ix> const void* data(Ix... index) const {
684 return static_cast<const void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
687 /// Mutable pointer to the contained data. If index is not provided, points to the
688 /// beginning of the buffer. May throw if the index would lead to out of bounds access.
689 /// May throw if the array is not writeable.
690 template<typename... Ix> void* mutable_data(Ix... index) {
692 return static_cast<void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
695 /// Byte offset from beginning of the array to a given index (full or partial).
696 /// May throw if the index would lead to out of bounds access.
697 template<typename... Ix> ssize_t offset_at(Ix... index) const {
698 if ((ssize_t) sizeof...(index) > ndim())
699 fail_dim_check(sizeof...(index), "too many indices for an array");
700 return byte_offset(ssize_t(index)...);
703 ssize_t offset_at() const { return 0; }
705 /// Item count from beginning of the array to a given index (full or partial).
706 /// May throw if the index would lead to out of bounds access.
707 template<typename... Ix> ssize_t index_at(Ix... index) const {
708 return offset_at(index...) / itemsize();
712 * Returns a proxy object that provides access to the array's data without bounds or
713 * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
714 * care: the array must not be destroyed or reshaped for the duration of the returned object,
715 * and the caller must take care not to access invalid dimensions or dimension indices.
717 template <typename T, ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
718 if (Dims >= 0 && ndim() != Dims)
719 throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) +
720 "; expected " + std::to_string(Dims));
721 return detail::unchecked_mutable_reference<T, Dims>(mutable_data(), shape(), strides(), ndim());
725 * Returns a proxy object that provides const access to the array's data without bounds or
726 * dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
727 * underlying array have the `writable` flag. Use with care: the array must not be destroyed or
728 * reshaped for the duration of the returned object, and the caller must take care not to access
729 * invalid dimensions or dimension indices.
731 template <typename T, ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {
732 if (Dims >= 0 && ndim() != Dims)
733 throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) +
734 "; expected " + std::to_string(Dims));
735 return detail::unchecked_reference<T, Dims>(data(), shape(), strides(), ndim());
738 /// Return a new view with all of the dimensions of length 1 removed
740 auto& api = detail::npy_api::get();
741 return reinterpret_steal<array>(api.PyArray_Squeeze_(m_ptr));
744 /// Resize array to given shape
745 /// If refcheck is true and more that one reference exist to this array
746 /// then resize will succeed only if it makes a reshape, i.e. original size doesn't change
747 void resize(ShapeContainer new_shape, bool refcheck = true) {
748 detail::npy_api::PyArray_Dims d = {
749 new_shape->data(), int(new_shape->size())
751 // try to resize, set ordering param to -1 cause it's not used anyway
752 object new_array = reinterpret_steal<object>(
753 detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1)
755 if (!new_array) throw error_already_set();
756 if (isinstance<array>(new_array)) { *this = std::move(new_array); }
759 /// Ensure that the argument is a NumPy array
760 /// In case of an error, nullptr is returned and the Python error is cleared.
761 static array ensure(handle h, int ExtraFlags = 0) {
762 auto result = reinterpret_steal<array>(raw_array(h.ptr(), ExtraFlags));
769 template<typename, typename> friend struct detail::npy_format_descriptor;
771 void fail_dim_check(ssize_t dim, const std::string& msg) const {
772 throw index_error(msg + ": " + std::to_string(dim) +
773 " (ndim = " + std::to_string(ndim()) + ")");
776 template<typename... Ix> ssize_t byte_offset(Ix... index) const {
777 check_dimensions(index...);
778 return detail::byte_offset_unsafe(strides(), ssize_t(index)...);
781 void check_writeable() const {
783 throw std::domain_error("array is not writeable");
786 // Default, C-style strides
787 static std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
788 auto ndim = shape.size();
789 std::vector<ssize_t> strides(ndim, itemsize);
791 for (size_t i = ndim - 1; i > 0; --i)
792 strides[i - 1] = strides[i] * shape[i];
796 // F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
797 static std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
798 auto ndim = shape.size();
799 std::vector<ssize_t> strides(ndim, itemsize);
800 for (size_t i = 1; i < ndim; ++i)
801 strides[i] = strides[i - 1] * shape[i - 1];
805 template<typename... Ix> void check_dimensions(Ix... index) const {
806 check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);
809 void check_dimensions_impl(ssize_t, const ssize_t*) const { }
811 template<typename... Ix> void check_dimensions_impl(ssize_t axis, const ssize_t* shape, ssize_t i, Ix... index) const {
813 throw index_error(std::string("index ") + std::to_string(i) +
814 " is out of bounds for axis " + std::to_string(axis) +
815 " with size " + std::to_string(*shape));
817 check_dimensions_impl(axis + 1, shape + 1, index...);
820 /// Create array from any object -- always returns a new reference
821 static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) {
822 if (ptr == nullptr) {
823 PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array from a nullptr");
826 return detail::npy_api::get().PyArray_FromAny_(
827 ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
831 template <typename T, int ExtraFlags = array::forcecast> class array_t : public array {
833 struct private_ctor {};
834 // Delegating constructor needed when both moving and accessing in the same constructor
835 array_t(private_ctor, ShapeContainer &&shape, StridesContainer &&strides, const T *ptr, handle base)
836 : array(std::move(shape), std::move(strides), ptr, base) {}
838 static_assert(!detail::array_info<T>::is_array, "Array types cannot be used with array_t");
840 using value_type = T;
842 array_t() : array(0, static_cast<const T *>(nullptr)) {}
843 array_t(handle h, borrowed_t) : array(h, borrowed_t{}) { }
844 array_t(handle h, stolen_t) : array(h, stolen_t{}) { }
846 PYBIND11_DEPRECATED("Use array_t<T>::ensure() instead")
847 array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) {
848 if (!m_ptr) PyErr_Clear();
849 if (!is_borrowed) Py_XDECREF(h.ptr());
852 array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) {
853 if (!m_ptr) throw error_already_set();
856 explicit array_t(const buffer_info& info) : array(info) { }
858 array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle())
859 : array(std::move(shape), std::move(strides), ptr, base) { }
861 explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())
862 : array_t(private_ctor{}, std::move(shape),
863 ExtraFlags & f_style ? f_strides(*shape, itemsize()) : c_strides(*shape, itemsize()),
866 explicit array_t(size_t count, const T *ptr = nullptr, handle base = handle())
867 : array({count}, {}, ptr, base) { }
869 constexpr ssize_t itemsize() const {
873 template<typename... Ix> ssize_t index_at(Ix... index) const {
874 return offset_at(index...) / itemsize();
877 template<typename... Ix> const T* data(Ix... index) const {
878 return static_cast<const T*>(array::data(index...));
881 template<typename... Ix> T* mutable_data(Ix... index) {
882 return static_cast<T*>(array::mutable_data(index...));
885 // Reference to element at a given index
886 template<typename... Ix> const T& at(Ix... index) const {
887 if ((ssize_t) sizeof...(index) != ndim())
888 fail_dim_check(sizeof...(index), "index dimension mismatch");
889 return *(static_cast<const T*>(array::data()) + byte_offset(ssize_t(index)...) / itemsize());
892 // Mutable reference to element at a given index
893 template<typename... Ix> T& mutable_at(Ix... index) {
894 if ((ssize_t) sizeof...(index) != ndim())
895 fail_dim_check(sizeof...(index), "index dimension mismatch");
896 return *(static_cast<T*>(array::mutable_data()) + byte_offset(ssize_t(index)...) / itemsize());
900 * Returns a proxy object that provides access to the array's data without bounds or
901 * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
902 * care: the array must not be destroyed or reshaped for the duration of the returned object,
903 * and the caller must take care not to access invalid dimensions or dimension indices.
905 template <ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
906 return array::mutable_unchecked<T, Dims>();
910 * Returns a proxy object that provides const access to the array's data without bounds or
911 * dimensionality checking. Unlike `unchecked()`, this does not require that the underlying
912 * array have the `writable` flag. Use with care: the array must not be destroyed or reshaped
913 * for the duration of the returned object, and the caller must take care not to access invalid
914 * dimensions or dimension indices.
916 template <ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {
917 return array::unchecked<T, Dims>();
920 /// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert
921 /// it). In case of an error, nullptr is returned and the Python error is cleared.
922 static array_t ensure(handle h) {
923 auto result = reinterpret_steal<array_t>(raw_array_t(h.ptr()));
929 static bool check_(handle h) {
930 const auto &api = detail::npy_api::get();
931 return api.PyArray_Check_(h.ptr())
932 && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of<T>().ptr());
936 /// Create array from any object -- always returns a new reference
937 static PyObject *raw_array_t(PyObject *ptr) {
938 if (ptr == nullptr) {
939 PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr");
942 return detail::npy_api::get().PyArray_FromAny_(
943 ptr, dtype::of<T>().release().ptr(), 0, 0,
944 detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
948 template <typename T>
949 struct format_descriptor<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
950 static std::string format() {
951 return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::format();
955 template <size_t N> struct format_descriptor<char[N]> {
956 static std::string format() { return std::to_string(N) + "s"; }
958 template <size_t N> struct format_descriptor<std::array<char, N>> {
959 static std::string format() { return std::to_string(N) + "s"; }
962 template <typename T>
963 struct format_descriptor<T, detail::enable_if_t<std::is_enum<T>::value>> {
964 static std::string format() {
965 return format_descriptor<
966 typename std::remove_cv<typename std::underlying_type<T>::type>::type>::format();
970 template <typename T>
971 struct format_descriptor<T, detail::enable_if_t<detail::array_info<T>::is_array>> {
972 static std::string format() {
973 using namespace detail;
974 static constexpr auto extents = _("(") + array_info<T>::extents + _(")");
975 return extents.text + format_descriptor<remove_all_extents_t<T>>::format();
979 NAMESPACE_BEGIN(detail)
980 template <typename T, int ExtraFlags>
981 struct pyobject_caster<array_t<T, ExtraFlags>> {
982 using type = array_t<T, ExtraFlags>;
984 bool load(handle src, bool convert) {
985 if (!convert && !type::check_(src))
987 value = type::ensure(src);
988 return static_cast<bool>(value);
991 static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
992 return src.inc_ref();
994 PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
997 template <typename T>
998 struct compare_buffer_info<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
999 static bool compare(const buffer_info& b) {
1000 return npy_api::get().PyArray_EquivTypes_(dtype::of<T>().ptr(), dtype(b).ptr());
1004 template <typename T, typename = void>
1005 struct npy_format_descriptor_name;
1007 template <typename T>
1008 struct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {
1009 static constexpr auto name = _<std::is_same<T, bool>::value>(
1010 _("bool"), _<std::is_signed<T>::value>("int", "uint") + _<sizeof(T)*8>()
1014 template <typename T>
1015 struct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {
1016 static constexpr auto name = _<std::is_same<T, float>::value || std::is_same<T, double>::value>(
1017 _("float") + _<sizeof(T)*8>(), _("longdouble")
1021 template <typename T>
1022 struct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {
1023 static constexpr auto name = _<std::is_same<typename T::value_type, float>::value
1024 || std::is_same<typename T::value_type, double>::value>(
1025 _("complex") + _<sizeof(typename T::value_type)*16>(), _("longcomplex")
1029 template <typename T>
1030 struct npy_format_descriptor<T, enable_if_t<satisfies_any_of<T, std::is_arithmetic, is_complex>::value>>
1031 : npy_format_descriptor_name<T> {
1033 // NB: the order here must match the one in common.h
1034 constexpr static const int values[15] = {
1036 npy_api::NPY_BYTE_, npy_api::NPY_UBYTE_, npy_api::NPY_INT16_, npy_api::NPY_UINT16_,
1037 npy_api::NPY_INT32_, npy_api::NPY_UINT32_, npy_api::NPY_INT64_, npy_api::NPY_UINT64_,
1038 npy_api::NPY_FLOAT_, npy_api::NPY_DOUBLE_, npy_api::NPY_LONGDOUBLE_,
1039 npy_api::NPY_CFLOAT_, npy_api::NPY_CDOUBLE_, npy_api::NPY_CLONGDOUBLE_
1043 static constexpr int value = values[detail::is_fmt_numeric<T>::index];
1045 static pybind11::dtype dtype() {
1046 if (auto ptr = npy_api::get().PyArray_DescrFromType_(value))
1047 return reinterpret_steal<pybind11::dtype>(ptr);
1048 pybind11_fail("Unsupported buffer format!");
1052 #define PYBIND11_DECL_CHAR_FMT \
1053 static constexpr auto name = _("S") + _<N>(); \
1054 static pybind11::dtype dtype() { return pybind11::dtype(std::string("S") + std::to_string(N)); }
1055 template <size_t N> struct npy_format_descriptor<char[N]> { PYBIND11_DECL_CHAR_FMT };
1056 template <size_t N> struct npy_format_descriptor<std::array<char, N>> { PYBIND11_DECL_CHAR_FMT };
1057 #undef PYBIND11_DECL_CHAR_FMT
1059 template<typename T> struct npy_format_descriptor<T, enable_if_t<array_info<T>::is_array>> {
1061 using base_descr = npy_format_descriptor<typename array_info<T>::type>;
1063 static_assert(!array_info<T>::is_empty, "Zero-sized arrays are not supported");
1065 static constexpr auto name = _("(") + array_info<T>::extents + _(")") + base_descr::name;
1066 static pybind11::dtype dtype() {
1068 array_info<T>::append_extents(shape);
1069 return pybind11::dtype::from_args(pybind11::make_tuple(base_descr::dtype(), shape));
1073 template<typename T> struct npy_format_descriptor<T, enable_if_t<std::is_enum<T>::value>> {
1075 using base_descr = npy_format_descriptor<typename std::underlying_type<T>::type>;
1077 static constexpr auto name = base_descr::name;
1078 static pybind11::dtype dtype() { return base_descr::dtype(); }
1081 struct field_descriptor {
1089 inline PYBIND11_NOINLINE void register_structured_dtype(
1090 any_container<field_descriptor> fields,
1091 const std::type_info& tinfo, ssize_t itemsize,
1092 bool (*direct_converter)(PyObject *, void *&)) {
1094 auto& numpy_internals = get_numpy_internals();
1095 if (numpy_internals.get_type_info(tinfo, false))
1096 pybind11_fail("NumPy: dtype is already registered");
1098 // Use ordered fields because order matters as of NumPy 1.14:
1099 // https://docs.scipy.org/doc/numpy/release.html#multiple-field-indexing-assignment-of-structured-arrays
1100 std::vector<field_descriptor> ordered_fields(std::move(fields));
1101 std::sort(ordered_fields.begin(), ordered_fields.end(),
1102 [](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; });
1104 list names, formats, offsets;
1105 for (auto& field : ordered_fields) {
1107 pybind11_fail(std::string("NumPy: unsupported field dtype: `") +
1108 field.name + "` @ " + tinfo.name());
1109 names.append(PYBIND11_STR_TYPE(field.name));
1110 formats.append(field.descr);
1111 offsets.append(pybind11::int_(field.offset));
1113 auto dtype_ptr = pybind11::dtype(names, formats, offsets, itemsize).release().ptr();
1115 // There is an existing bug in NumPy (as of v1.11): trailing bytes are
1116 // not encoded explicitly into the format string. This will supposedly
1117 // get fixed in v1.12; for further details, see these:
1118 // - https://github.com/numpy/numpy/issues/7797
1119 // - https://github.com/numpy/numpy/pull/7798
1120 // Because of this, we won't use numpy's logic to generate buffer format
1121 // strings and will just do it ourselves.
1123 std::ostringstream oss;
1124 // mark the structure as unaligned with '^', because numpy and C++ don't
1125 // always agree about alignment (particularly for complex), and we're
1126 // explicitly listing all our padding. This depends on none of the fields
1127 // overriding the endianness. Putting the ^ in front of individual fields
1128 // isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049
1130 for (auto& field : ordered_fields) {
1131 if (field.offset > offset)
1132 oss << (field.offset - offset) << 'x';
1133 oss << field.format << ':' << field.name << ':';
1134 offset = field.offset + field.size;
1136 if (itemsize > offset)
1137 oss << (itemsize - offset) << 'x';
1139 auto format_str = oss.str();
1141 // Sanity check: verify that NumPy properly parses our buffer format string
1142 auto& api = npy_api::get();
1143 auto arr = array(buffer_info(nullptr, itemsize, format_str, 1));
1144 if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr()))
1145 pybind11_fail("NumPy: invalid buffer descriptor!");
1147 auto tindex = std::type_index(tinfo);
1148 numpy_internals.registered_dtypes[tindex] = { dtype_ptr, format_str };
1149 get_internals().direct_conversions[tindex].push_back(direct_converter);
1152 template <typename T, typename SFINAE> struct npy_format_descriptor {
1153 static_assert(is_pod_struct<T>::value, "Attempt to use a non-POD or unimplemented POD type as a numpy dtype");
1155 static constexpr auto name = make_caster<T>::name;
1157 static pybind11::dtype dtype() {
1158 return reinterpret_borrow<pybind11::dtype>(dtype_ptr());
1161 static std::string format() {
1162 static auto format_str = get_numpy_internals().get_type_info<T>(true)->format_str;
1166 static void register_dtype(any_container<field_descriptor> fields) {
1167 register_structured_dtype(std::move(fields), typeid(typename std::remove_cv<T>::type),
1168 sizeof(T), &direct_converter);
1172 static PyObject* dtype_ptr() {
1173 static PyObject* ptr = get_numpy_internals().get_type_info<T>(true)->dtype_ptr;
1177 static bool direct_converter(PyObject *obj, void*& value) {
1178 auto& api = npy_api::get();
1179 if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_))
1181 if (auto descr = reinterpret_steal<object>(api.PyArray_DescrFromScalar_(obj))) {
1182 if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) {
1183 value = ((PyVoidScalarObject_Proxy *) obj)->obval;
1191 #ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code)
1192 # define PYBIND11_NUMPY_DTYPE(Type, ...) ((void)0)
1193 # define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void)0)
1196 #define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \
1197 ::pybind11::detail::field_descriptor { \
1198 Name, offsetof(T, Field), sizeof(decltype(std::declval<T>().Field)), \
1199 ::pybind11::format_descriptor<decltype(std::declval<T>().Field)>::format(), \
1200 ::pybind11::detail::npy_format_descriptor<decltype(std::declval<T>().Field)>::dtype() \
1203 // Extract name, offset and format descriptor for a struct field
1204 #define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field)
1206 // The main idea of this macro is borrowed from https://github.com/swansontec/map-macro
1207 // (C) William Swanson, Paul Fultz
1208 #define PYBIND11_EVAL0(...) __VA_ARGS__
1209 #define PYBIND11_EVAL1(...) PYBIND11_EVAL0 (PYBIND11_EVAL0 (PYBIND11_EVAL0 (__VA_ARGS__)))
1210 #define PYBIND11_EVAL2(...) PYBIND11_EVAL1 (PYBIND11_EVAL1 (PYBIND11_EVAL1 (__VA_ARGS__)))
1211 #define PYBIND11_EVAL3(...) PYBIND11_EVAL2 (PYBIND11_EVAL2 (PYBIND11_EVAL2 (__VA_ARGS__)))
1212 #define PYBIND11_EVAL4(...) PYBIND11_EVAL3 (PYBIND11_EVAL3 (PYBIND11_EVAL3 (__VA_ARGS__)))
1213 #define PYBIND11_EVAL(...) PYBIND11_EVAL4 (PYBIND11_EVAL4 (PYBIND11_EVAL4 (__VA_ARGS__)))
1214 #define PYBIND11_MAP_END(...)
1215 #define PYBIND11_MAP_OUT
1216 #define PYBIND11_MAP_COMMA ,
1217 #define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END
1218 #define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT
1219 #define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0)
1220 #define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next)
1221 #ifdef _MSC_VER // MSVC is not as eager to expand macros, hence this workaround
1222 #define PYBIND11_MAP_LIST_NEXT1(test, next) \
1223 PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
1225 #define PYBIND11_MAP_LIST_NEXT1(test, next) \
1226 PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)
1228 #define PYBIND11_MAP_LIST_NEXT(test, next) \
1229 PYBIND11_MAP_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)
1230 #define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \
1231 f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST1) (f, t, peek, __VA_ARGS__)
1232 #define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \
1233 f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST0) (f, t, peek, __VA_ARGS__)
1234 // PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ...
1235 #define PYBIND11_MAP_LIST(f, t, ...) \
1236 PYBIND11_EVAL (PYBIND11_MAP_LIST1 (f, t, __VA_ARGS__, (), 0))
1238 #define PYBIND11_NUMPY_DTYPE(Type, ...) \
1239 ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \
1240 (::std::vector<::pybind11::detail::field_descriptor> \
1241 {PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})
1244 #define PYBIND11_MAP2_LIST_NEXT1(test, next) \
1245 PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
1247 #define PYBIND11_MAP2_LIST_NEXT1(test, next) \
1248 PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)
1250 #define PYBIND11_MAP2_LIST_NEXT(test, next) \
1251 PYBIND11_MAP2_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)
1252 #define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \
1253 f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST1) (f, t, peek, __VA_ARGS__)
1254 #define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \
1255 f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST0) (f, t, peek, __VA_ARGS__)
1256 // PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ...
1257 #define PYBIND11_MAP2_LIST(f, t, ...) \
1258 PYBIND11_EVAL (PYBIND11_MAP2_LIST1 (f, t, __VA_ARGS__, (), 0))
1260 #define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \
1261 ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \
1262 (::std::vector<::pybind11::detail::field_descriptor> \
1263 {PYBIND11_MAP2_LIST (PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)})
1265 #endif // __CLION_IDE__
1268 using array_iterator = typename std::add_pointer<T>::type;
1271 array_iterator<T> array_begin(const buffer_info& buffer) {
1272 return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr));
1276 array_iterator<T> array_end(const buffer_info& buffer) {
1277 return array_iterator<T>(reinterpret_cast<T*>(buffer.ptr) + buffer.size);
1280 class common_iterator {
1282 using container_type = std::vector<ssize_t>;
1283 using value_type = container_type::value_type;
1284 using size_type = container_type::size_type;
1286 common_iterator() : p_ptr(0), m_strides() {}
1288 common_iterator(void* ptr, const container_type& strides, const container_type& shape)
1289 : p_ptr(reinterpret_cast<char*>(ptr)), m_strides(strides.size()) {
1290 m_strides.back() = static_cast<value_type>(strides.back());
1291 for (size_type i = m_strides.size() - 1; i != 0; --i) {
1292 size_type j = i - 1;
1293 value_type s = static_cast<value_type>(shape[i]);
1294 m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;
1298 void increment(size_type dim) {
1299 p_ptr += m_strides[dim];
1302 void* data() const {
1308 container_type m_strides;
1311 template <size_t N> class multi_array_iterator {
1313 using container_type = std::vector<ssize_t>;
1315 multi_array_iterator(const std::array<buffer_info, N> &buffers,
1316 const container_type &shape)
1317 : m_shape(shape.size()), m_index(shape.size(), 0),
1318 m_common_iterator() {
1320 // Manual copy to avoid conversion warning if using std::copy
1321 for (size_t i = 0; i < shape.size(); ++i)
1322 m_shape[i] = shape[i];
1324 container_type strides(shape.size());
1325 for (size_t i = 0; i < N; ++i)
1326 init_common_iterator(buffers[i], shape, m_common_iterator[i], strides);
1329 multi_array_iterator& operator++() {
1330 for (size_t j = m_index.size(); j != 0; --j) {
1332 if (++m_index[i] != m_shape[i]) {
1333 increment_common_iterator(i);
1342 template <size_t K, class T = void> T* data() const {
1343 return reinterpret_cast<T*>(m_common_iterator[K].data());
1348 using common_iter = common_iterator;
1350 void init_common_iterator(const buffer_info &buffer,
1351 const container_type &shape,
1352 common_iter &iterator,
1353 container_type &strides) {
1354 auto buffer_shape_iter = buffer.shape.rbegin();
1355 auto buffer_strides_iter = buffer.strides.rbegin();
1356 auto shape_iter = shape.rbegin();
1357 auto strides_iter = strides.rbegin();
1359 while (buffer_shape_iter != buffer.shape.rend()) {
1360 if (*shape_iter == *buffer_shape_iter)
1361 *strides_iter = *buffer_strides_iter;
1365 ++buffer_shape_iter;
1366 ++buffer_strides_iter;
1371 std::fill(strides_iter, strides.rend(), 0);
1372 iterator = common_iter(buffer.ptr, strides, shape);
1375 void increment_common_iterator(size_t dim) {
1376 for (auto &iter : m_common_iterator)
1377 iter.increment(dim);
1380 container_type m_shape;
1381 container_type m_index;
1382 std::array<common_iter, N> m_common_iterator;
1385 enum class broadcast_trivial { non_trivial, c_trivial, f_trivial };
1387 // Populates the shape and number of dimensions for the set of buffers. Returns a broadcast_trivial
1388 // enum value indicating whether the broadcast is "trivial"--that is, has each buffer being either a
1389 // singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous (`f_trivial`) storage
1390 // buffer; returns `non_trivial` otherwise.
1392 broadcast_trivial broadcast(const std::array<buffer_info, N> &buffers, ssize_t &ndim, std::vector<ssize_t> &shape) {
1393 ndim = std::accumulate(buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) {
1394 return std::max(res, buf.ndim);
1398 shape.resize((size_t) ndim, 1);
1400 // Figure out the output size, and make sure all input arrays conform (i.e. are either size 1 or
1402 for (size_t i = 0; i < N; ++i) {
1403 auto res_iter = shape.rbegin();
1404 auto end = buffers[i].shape.rend();
1405 for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end; ++shape_iter, ++res_iter) {
1406 const auto &dim_size_in = *shape_iter;
1407 auto &dim_size_out = *res_iter;
1409 // Each input dimension can either be 1 or `n`, but `n` values must match across buffers
1410 if (dim_size_out == 1)
1411 dim_size_out = dim_size_in;
1412 else if (dim_size_in != 1 && dim_size_in != dim_size_out)
1413 pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!");
1417 bool trivial_broadcast_c = true;
1418 bool trivial_broadcast_f = true;
1419 for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) {
1420 if (buffers[i].size == 1)
1423 // Require the same number of dimensions:
1424 if (buffers[i].ndim != ndim)
1425 return broadcast_trivial::non_trivial;
1427 // Require all dimensions be full-size:
1428 if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin()))
1429 return broadcast_trivial::non_trivial;
1431 // Check for C contiguity (but only if previous inputs were also C contiguous)
1432 if (trivial_broadcast_c) {
1433 ssize_t expect_stride = buffers[i].itemsize;
1434 auto end = buffers[i].shape.crend();
1435 for (auto shape_iter = buffers[i].shape.crbegin(), stride_iter = buffers[i].strides.crbegin();
1436 trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) {
1437 if (expect_stride == *stride_iter)
1438 expect_stride *= *shape_iter;
1440 trivial_broadcast_c = false;
1444 // Check for Fortran contiguity (if previous inputs were also F contiguous)
1445 if (trivial_broadcast_f) {
1446 ssize_t expect_stride = buffers[i].itemsize;
1447 auto end = buffers[i].shape.cend();
1448 for (auto shape_iter = buffers[i].shape.cbegin(), stride_iter = buffers[i].strides.cbegin();
1449 trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) {
1450 if (expect_stride == *stride_iter)
1451 expect_stride *= *shape_iter;
1453 trivial_broadcast_f = false;
1459 trivial_broadcast_c ? broadcast_trivial::c_trivial :
1460 trivial_broadcast_f ? broadcast_trivial::f_trivial :
1461 broadcast_trivial::non_trivial;
1464 template <typename T>
1465 struct vectorize_arg {
1466 static_assert(!std::is_rvalue_reference<T>::value, "Functions with rvalue reference arguments cannot be vectorized");
1467 // The wrapped function gets called with this type:
1468 using call_type = remove_reference_t<T>;
1469 // Is this a vectorized argument?
1470 static constexpr bool vectorize =
1471 satisfies_any_of<call_type, std::is_arithmetic, is_complex, std::is_pod>::value &&
1472 satisfies_none_of<call_type, std::is_pointer, std::is_array, is_std_array, std::is_enum>::value &&
1473 (!std::is_reference<T>::value ||
1474 (std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));
1475 // Accept this type: an array for vectorized types, otherwise the type as-is:
1476 using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;
1479 template <typename Func, typename Return, typename... Args>
1480 struct vectorize_helper {
1482 static constexpr size_t N = sizeof...(Args);
1483 static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);
1484 static_assert(NVectorized >= 1,
1485 "pybind11::vectorize(...) requires a function with at least one vectorizable argument");
1488 template <typename T>
1489 explicit vectorize_helper(T &&f) : f(std::forward<T>(f)) { }
1491 object operator()(typename vectorize_arg<Args>::type... args) {
1493 make_index_sequence<N>(),
1494 select_indices<vectorize_arg<Args>::vectorize...>(),
1495 make_index_sequence<NVectorized>());
1499 remove_reference_t<Func> f;
1501 // Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling with "/permissive-" flag
1502 // when arg_call_types is manually inlined.
1503 using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;
1504 template <size_t Index> using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;
1506 // Runs a vectorized function given arguments tuple and three index sequences:
1507 // - Index is the full set of 0 ... (N-1) argument indices;
1508 // - VIndex is the subset of argument indices with vectorized parameters, letting us access
1509 // vectorized arguments (anything not in this sequence is passed through)
1510 // - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that
1511 // we can store vectorized buffer_infos in an array (argument VIndex has its buffer at
1512 // index BIndex in the array).
1513 template <size_t... Index, size_t... VIndex, size_t... BIndex> object run(
1514 typename vectorize_arg<Args>::type &...args,
1515 index_sequence<Index...> i_seq, index_sequence<VIndex...> vi_seq, index_sequence<BIndex...> bi_seq) {
1517 // Pointers to values the function was called with; the vectorized ones set here will start
1518 // out as array_t<T> pointers, but they will be changed them to T pointers before we make
1519 // call the wrapped function. Non-vectorized pointers are left as-is.
1520 std::array<void *, N> params{{ &args... }};
1522 // The array of `buffer_info`s of vectorized arguments:
1523 std::array<buffer_info, NVectorized> buffers{{ reinterpret_cast<array *>(params[VIndex])->request()... }};
1525 /* Determine dimensions parameters of output array */
1527 std::vector<ssize_t> shape(0);
1528 auto trivial = broadcast(buffers, nd, shape);
1529 size_t ndim = (size_t) nd;
1531 size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());
1533 // If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e.
1534 // not wrapped in an array).
1535 if (size == 1 && ndim == 0) {
1536 PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);
1537 return cast(f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...));
1540 array_t<Return> result;
1541 if (trivial == broadcast_trivial::f_trivial) result = array_t<Return, array::f_style>(shape);
1542 else result = array_t<Return>(shape);
1544 if (size == 0) return std::move(result);
1546 /* Call the function */
1547 if (trivial == broadcast_trivial::non_trivial)
1548 apply_broadcast(buffers, params, result, i_seq, vi_seq, bi_seq);
1550 apply_trivial(buffers, params, result.mutable_data(), size, i_seq, vi_seq, bi_seq);
1552 return std::move(result);
1555 template <size_t... Index, size_t... VIndex, size_t... BIndex>
1556 void apply_trivial(std::array<buffer_info, NVectorized> &buffers,
1557 std::array<void *, N> ¶ms,
1560 index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
1562 // Initialize an array of mutable byte references and sizes with references set to the
1563 // appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size
1564 // (except for singletons, which get an increment of 0).
1565 std::array<std::pair<unsigned char *&, const size_t>, NVectorized> vecparams{{
1566 std::pair<unsigned char *&, const size_t>(
1567 reinterpret_cast<unsigned char *&>(params[VIndex] = buffers[BIndex].ptr),
1568 buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t<VIndex>)
1572 for (size_t i = 0; i < size; ++i) {
1573 out[i] = f(*reinterpret_cast<param_n_t<Index> *>(params[Index])...);
1574 for (auto &x : vecparams) x.first += x.second;
1578 template <size_t... Index, size_t... VIndex, size_t... BIndex>
1579 void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,
1580 std::array<void *, N> ¶ms,
1581 array_t<Return> &output_array,
1582 index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
1584 buffer_info output = output_array.request();
1585 multi_array_iterator<NVectorized> input_iter(buffers, output.shape);
1587 for (array_iterator<Return> iter = array_begin<Return>(output), end = array_end<Return>(output);
1589 ++iter, ++input_iter) {
1590 PYBIND11_EXPAND_SIDE_EFFECTS((
1591 params[VIndex] = input_iter.template data<BIndex>()
1593 *iter = f(*reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
1598 template <typename Func, typename Return, typename... Args>
1599 vectorize_helper<Func, Return, Args...>
1600 vectorize_extractor(const Func &f, Return (*) (Args ...)) {
1601 return detail::vectorize_helper<Func, Return, Args...>(f);
1604 template <typename T, int Flags> struct handle_type_name<array_t<T, Flags>> {
1605 static constexpr auto name = _("numpy.ndarray[") + npy_format_descriptor<T>::name + _("]");
1608 NAMESPACE_END(detail)
1610 // Vanilla pointer vectorizer:
1611 template <typename Return, typename... Args>
1612 detail::vectorize_helper<Return (*)(Args...), Return, Args...>
1613 vectorize(Return (*f) (Args ...)) {
1614 return detail::vectorize_helper<Return (*)(Args...), Return, Args...>(f);
1617 // lambda vectorizer:
1618 template <typename Func, detail::enable_if_t<detail::is_lambda<Func>::value, int> = 0>
1619 auto vectorize(Func &&f) -> decltype(
1620 detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr)) {
1621 return detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr);
1624 // Vectorize a class method (non-const):
1625 template <typename Return, typename Class, typename... Args,
1626 typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...)>())), Return, Class *, Args...>>
1627 Helper vectorize(Return (Class::*f)(Args...)) {
1628 return Helper(std::mem_fn(f));
1631 // Vectorize a class method (const):
1632 template <typename Return, typename Class, typename... Args,
1633 typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...) const>())), Return, const Class *, Args...>>
1634 Helper vectorize(Return (Class::*f)(Args...) const) {
1635 return Helper(std::mem_fn(f));
1638 NAMESPACE_END(PYBIND11_NAMESPACE)
1640 #if defined(_MSC_VER)
1641 #pragma warning(pop)