[STYLE] check python with flake8 (#424)

I've been using this locally to find errors without running tests, and now that we're using autopep8, it passes with minimal suppressions. This is also what turned up the issues with the tutorials, which were fixed in #422.
This commit is contained in:
Madeleine Thompson
2022-01-07 15:28:36 -08:00
committed by GitHub
parent a70acfec77
commit efdabe6073
17 changed files with 18 additions and 24 deletions

View File

@@ -36,6 +36,9 @@ jobs:
- name: Check style - name: Check style
run: "autopep8 -a -r -d --exit-code ./python || ( echo '::error title=Style issues::Please run \"autopep8 -a -r -i ./python\"' ; exit 1 )" run: "autopep8 -a -r -d --exit-code ./python || ( echo '::error title=Style issues::Please run \"autopep8 -a -r -i ./python\"' ; exit 1 )"
- name: Flake8
run: "flake8 --config ./python/setup.cfg ./python || ( echo '::error::Flake8 failed; see logs for errors.' ; exit 1 )"
- name: Unit tests - name: Unit tests
run: | run: |
cd python/test/unit cd python/test/unit

View File

@@ -50,7 +50,6 @@ def bench_op(M, N, K, AT, BT, dtype, provider, warmup=25, rep=75):
a = a.t() a = a.t()
if BT: if BT:
b = b.t() b = b.t()
num_flops = 2 * M * N * K
tflops = lambda ms: 2. * M * N * K / ms * 1e-9 tflops = lambda ms: 2. * M * N * K / ms * 1e-9
if provider == "cublas": if provider == "cublas":
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), warmup=warmup, rep=rep) ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), warmup=warmup, rep=rep)

View File

@@ -3,3 +3,6 @@ description_file = README.md
[pycodestyle] [pycodestyle]
ignore = E501,E701,E731 ignore = E501,E701,E731
[flake8]
ignore = E501,E701,E731

View File

@@ -149,6 +149,7 @@ setup(
extras_require={ extras_require={
"tests": [ "tests": [
"autopep8", "autopep8",
"flake8",
"isort", "isort",
"numpy", "numpy",
"pytest", "pytest",

View File

@@ -3,7 +3,6 @@ import sys
import pytest import pytest
import torch import torch
from numpy import record
import triton import triton
import triton.language as tl import triton.language as tl

View File

@@ -1,3 +1,4 @@
# flake8: noqa: F821,F841
import copy import copy
import itertools import itertools
import re import re

View File

@@ -2,7 +2,6 @@ import numpy as np
import pytest import pytest
import scipy.stats import scipy.stats
import torch import torch
from numpy.random import Philox
import triton import triton
import triton.language as tl import triton.language as tl

View File

@@ -1,4 +1,5 @@
"""isort:skip_file""" """isort:skip_file"""
# flake8: noqa: F401
__version__ = '2.0.0' __version__ = '2.0.0'
# TODO: torch needs to be imported first # TODO: torch needs to be imported first

View File

@@ -1,19 +1,17 @@
import ast import ast
import builtins import builtins
import dbm
import functools import functools
import hashlib import hashlib
import inspect import inspect
import os import os
import pickle import pickle
import struct
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import textwrap import textwrap
import time import time
import warnings import warnings
from typing import Dict, Optional from typing import Dict
import torch import torch
from filelock import FileLock from filelock import FileLock
@@ -406,7 +404,7 @@ class CodeGenerator(ast.NodeVisitor):
self.visit(pos_cond_node), self.visit(pos_cond_node),
self.visit(neg_cond_node), self.visit(neg_cond_node),
_builder=self.builder) _builder=self.builder)
#cond_node = neg_cond_node # cond_node = neg_cond_node
step_node = ast.AugAssign(target=st_target, op=ast.Add(), value=arg_2) step_node = ast.AugAssign(target=st_target, op=ast.Add(), value=arg_2)
# code generation # code generation
current_bb = self.builder.get_insert_block() current_bb = self.builder.get_insert_block()

View File

@@ -1,3 +1,4 @@
# flake8: noqa: F401
from . import core, random from . import core, random
from .core import * from .core import *
from .random import * from .random import *

View File

@@ -802,14 +802,6 @@ def max_contiguous(input, value, _builder=None):
return frontend.max_contiguous(input, value, _builder) return frontend.max_contiguous(input, value, _builder)
@builtin
def max_contiguous(input, value, _builder=None):
"""
Let the compiler knows that the `value` first values in :code:`input` are contiguous.
"""
return frontend.max_contiguous(input, value, _builder)
# ----------------------- # -----------------------
# Standard library # Standard library
# ----------------------- # -----------------------

View File

@@ -1,3 +1,4 @@
# flake8: noqa: F401
#from .conv import _conv, conv #from .conv import _conv, conv
from . import blocksparse from . import blocksparse
from .cross_entropy import _cross_entropy, cross_entropy from .cross_entropy import _cross_entropy, cross_entropy

View File

@@ -1,2 +1,3 @@
# flake8: noqa: F401
from .matmul import matmul from .matmul import matmul
from .softmax import softmax from .softmax import softmax

View File

@@ -1,5 +1,3 @@
import os
import torch import torch
import triton import triton
@@ -96,11 +94,9 @@ class _cross_entropy(torch.autograd.Function):
""" """
# load saved tensors # load saved tensors
neg_logprobs, indices = ctx.saved_tensors neg_logprobs, indices = ctx.saved_tensors
# make kernel
device, dtype = neg_logprobs.device, neg_logprobs.dtype
n_cols = neg_logprobs.shape[-1]
# run the kernel # run the kernel
# neg_logprobs will be modified in place to become our gradient: # neg_logprobs will be modified in place to become our gradient:
n_cols = neg_logprobs.shape[-1]
grid = lambda opt: (neg_logprobs.numel() // n_cols, ) grid = lambda opt: (neg_logprobs.numel() // n_cols, )
_backward[grid](neg_logprobs, indices, dneg_logprobs, n_cols) _backward[grid](neg_logprobs, indices, dneg_logprobs, n_cols)
return neg_logprobs, None return neg_logprobs, None

View File

@@ -2,7 +2,7 @@ import torch
import triton import triton
import triton.language as tl import triton.language as tl
from .matmul_perf_model import * from .matmul_perf_model import estimate_matmul_time, prune_num_stages
def init_to_zero(name): def init_to_zero(name):

View File

@@ -20,7 +20,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. # SOFTWARE.
import argparse
import re import re
import subprocess import subprocess
@@ -75,7 +74,7 @@ def extract(file_path, fun):
# .headerflags: ... # .headerflags: ...
# /*0000*/ asmstr /*0x...*/ # /*0000*/ asmstr /*0x...*/
# /*0x...*/ # /*0x...*/
fname_match = FNAME_RE.match(line)
# Looking for new function header (function: <name>) # Looking for new function header (function: <name>)
while FNAME_RE.match(line) is None: while FNAME_RE.match(line) is None:
line_idx += 1 line_idx += 1

View File

@@ -65,7 +65,7 @@ def add(x: torch.Tensor, y: torch.Tensor):
# - each torch.tensor object is implicitly converted into a pointer to its first element. # - each torch.tensor object is implicitly converted into a pointer to its first element.
# - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel # - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel
# - don't forget to pass meta-parameters as keywords arguments # - don't forget to pass meta-parameters as keywords arguments
pgm = add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024) add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still # We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
# running asynchronously at this point. # running asynchronously at this point.
return output return output