Examples¶
All examples are located in the GitHub repository. Their names start with the 2-digit number followed by a descriptive name. You can run examples in any order, however, if you are new to the Data Parallel Extensions for Python, it is recommended to go in the order examples are enumerated.
The following command will run the very first example of using Data Parallel Extensions for Python
python ./examples/01-hello_dpnp.py
These are listings of these examples:
import dpnp as np
x = np.asarray([1, 2, 3])
print("Array x allocated on the device:", x.device)
y = np.sum(x)
print("Result y is located on the device:", y.device) # The same device as x
print("Shape of y is:", y.shape) # 0-dimensional array
print("y=", y) # Expect 6
import dpnp as np
x = np.empty(3)
try:
x = np.asarray([1, 2, 3], device="gpu")
except Exception:
print("GPU device is not available")
print("Array x allocated on the device:", x.device)
y = np.sum(x)
print("Result y is located on the device:", y.device) # The same device as x
print("Shape of y is:", y.shape) # 0-dimensional array
print("y=", y) # Expect 6
import dpnp as np
from numba_dpex import dpjit as njit
@njit()
def sum_it(x): # Device queue is inferred from x. The kernel is submitted to that queue
return np.sum(x)
x = np.empty(3)
try:
x = np.asarray([1, 2, 3], device="gpu")
except Exception:
print("GPU device is not available")
print("Array x allocated on the device:", x.device)
y = sum_it(x)
print("Result y is located on the device:", y.device) # The same device as x
print("Shape of y is:", y.shape) # 0-dimensional array
print("y=", y) # Expect 6
import dpctl
dpctl.lsplatform() # Print platform information
print(
"GPU devices:", dpctl.get_devices(device_type="gpu")
) # Get the list of all GPU devices
print(
"Number of GPU devices", dpctl.get_num_devices(device_type="gpu")
) # Get the number of GPU devices
print("Has CPU devices?", dpctl.has_cpu_devices()) # Check if there are CPU devices