[FRONTEND] add python e2e launch empty kernel test (#68)
This commit is contained in:
@@ -2,18 +2,21 @@ import torch
|
||||
|
||||
import triton
|
||||
import triton.language as tl
|
||||
import triton.runtime as runtime
|
||||
|
||||
# trigger the torch.device implicitly to ensure cuda context initialization
|
||||
torch.zeros([10], device=torch.device('cuda'))
|
||||
|
||||
|
||||
@triton.jit
|
||||
def empty_kernel(X, stride_xm, BLOCK: tl.constexpr):
|
||||
pass
|
||||
|
||||
|
||||
def test_empty_kernel_cubin_compile():
|
||||
@triton.jit
|
||||
def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr):
|
||||
pass
|
||||
|
||||
device = torch.cuda.current_device()
|
||||
cubin = triton.compile(kernel,
|
||||
cubin = triton.compile(empty_kernel,
|
||||
"*fp32,i32,i32",
|
||||
device=device,
|
||||
constants={"BLOCK": 256},
|
||||
@@ -21,3 +24,25 @@ def test_empty_kernel_cubin_compile():
|
||||
|
||||
print('cubin size:', len(cubin))
|
||||
assert len(cubin) > 0
|
||||
|
||||
|
||||
def test_empty_kernel_launch():
|
||||
device = torch.cuda.current_device()
|
||||
binary = runtime.build_kernel(empty_kernel, "*fp32,i32,i32",
|
||||
device=device,
|
||||
constants={"BLOCK": 256},
|
||||
num_warps=4,
|
||||
num_stages=3)
|
||||
grid = lambda META: (
|
||||
triton.cdiv(1024, META['BLOCK']) * triton.cdiv(1024, META['BLOCK']),
|
||||
)
|
||||
|
||||
A = torch.zeros([1024], device="cuda")
|
||||
runtime.launch_kernel(fn=empty_kernel,
|
||||
binary=binary,
|
||||
grid=grid,
|
||||
num_warps=4,
|
||||
num_stages=3,
|
||||
X=A,
|
||||
stride_xm=256,
|
||||
BLOCK=tl.constexpr(256))
|
||||
|
27
python/tests/test_vecadd_no_scf.py
Normal file
27
python/tests/test_vecadd_no_scf.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import triton
|
||||
import triton.language as tl
|
||||
|
||||
NUM_WARPS = 4
|
||||
|
||||
# triton kernel
|
||||
|
||||
|
||||
def test_vecadd_no_scf():
|
||||
@triton.jit
|
||||
def kernel(x_ptr, stride_xn,
|
||||
y_ptr, stride_yn,
|
||||
z_ptr, stride_zn,
|
||||
BLOCK_SIZE_N: tl.constexpr):
|
||||
pid = tl.program_id(axis=0)
|
||||
offset = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
||||
x_ptrs = x_ptr + offset
|
||||
y_ptrs = y_ptr + offset
|
||||
x = tl.load(x_ptrs)
|
||||
y = tl.load(y_ptrs)
|
||||
z = x + y
|
||||
z_ptrs = z_ptr + offset
|
||||
tl.store(z_ptrs, z)
|
||||
|
||||
ret = triton.compile(kernel, "*fp32,i32,*fp32,i32,*fp32,i32", constants={"BLOCK_SIZE_N": 256}, num_warps=NUM_WARPS, device=0, output="ptx")
|
||||
|
||||
print(ret)
|
Reference in New Issue
Block a user