Skip to content
This repository has been archived by the owner on May 24, 2021. It is now read-only.

Commit

Permalink
removed deprecated latency observer
Browse files Browse the repository at this point in the history
  • Loading branch information
awwong1 committed Oct 26, 2019
1 parent ac301cb commit 54b554b
Show file tree
Hide file tree
Showing 9 changed files with 70 additions and 216 deletions.
5 changes: 5 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[flake8]
ignore = E203, E266, E501, W503
max-line-length = 80
max-complexity = 18
select = B,C,E,F,W,T4,B9
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ dmypy.json
.LSOverride

# Icon must end with two \r
Icon
Icon


# Thumbnails
._*
Expand Down Expand Up @@ -182,3 +183,4 @@ $RECYCLE.BIN/
# .nfs files are created when an open file is removed but is still being accessed
.nfs*

.vscode
21 changes: 2 additions & 19 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,19 +1,2 @@
appdirs==1.4.3
astroid==2.2.5
attrs==19.1.0
black==19.3b0
Click==7.0
isort==4.3.21
lazy-object-proxy==1.4.1
mccabe==0.6.1
mypy==0.711
mypy-extensions==0.4.1
numpy==1.16.4
Pillow==6.1.0
pkg-resources==0.0.0
six==1.12.0
toml==0.10.0
torch==1.1.0
torchvision==0.3.0
typed-ast==1.4.0
wrapt==1.11.2
torch>=1.1.0
torchvision>=0.3.0
40 changes: 31 additions & 9 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,31 @@
import codecs
import os
import re
import setuptools

with open("README.md", "r") as fh:
long_description = fh.read()
here = os.path.abspath(os.path.dirname(__file__))


def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()


def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)

raise RuntimeError("Unable to find version string.")


long_description = read("README.md")
version = find_version("torchprof", "__init__.py")

setuptools.setup(
name="torchprof",
version="0.3.1",
version=version,
author="Alexander Wong",
author_email="alex@udia.ca",
description="Measure neural network device specific metrics (latency, flops, etc.)",
Expand All @@ -14,12 +34,14 @@
url="https://github.com/awwong1/torchprof",
packages=setuptools.find_packages(),
license="MIT",
install_requires=[
"torch>=1.1.0,<2"
],
install_requires=["torch>=1.1.0,<2"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
)
7 changes: 4 additions & 3 deletions tests/test_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import torch
import torchprof
import torchvision
import pprint


class TestProfile(unittest.TestCase):
Expand Down Expand Up @@ -137,7 +136,9 @@ def test_cpu_profile_structure(self):
def test_cuda_profile_structure(self):
model = torchvision.models.alexnet(pretrained=False).cuda()
x = torch.rand([1, 3, 224, 224]).cuda()
self._profile_structure(model, x, use_cuda=True, alexnet_ops=self.alexnet_gpu_ops)
self._profile_structure(
model, x, use_cuda=True, alexnet_ops=self.alexnet_gpu_ops
)

def _profile_structure(self, model, x, use_cuda=False, alexnet_ops=[]):
with torchprof.Profile(model, use_cuda=use_cuda) as prof:
Expand All @@ -164,4 +165,4 @@ def _profile_structure(self, model, x, use_cuda=False, alexnet_ops=[]):
self.assertIsInstance(pretty, str)
self.assertIsInstance(pretty_full, str)

# pprint.pprint(pretty)
# pprint.pprint(pretty)
1 change: 1 addition & 0 deletions tests/test_set_paths.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torchprof
import torchvision


class TestSetPaths(unittest.TestCase):
def test_cpu_profile_structure(self):
model = torchvision.models.alexnet(pretrained=False)
Expand Down
9 changes: 4 additions & 5 deletions torchprof/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
name = "torchprof"

from torchprof.latency_observer import LatencyObserver
from torchprof.profile import Profile

__all__ = ["LatencyObserver", "Profile"]
__version__ = "0.3.1"
name = "torchprof"

__all__ = ["Profile"]
__version__ = "0.3.1"
170 changes: 0 additions & 170 deletions torchprof/latency_observer.py

This file was deleted.

29 changes: 20 additions & 9 deletions torchprof/profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,19 @@ def __exit__(self, exc_type, exc_val, exc_tb):

def __str__(self):
if self.exited:
return traces_to_display(self.traces, self.trace_profile_events, paths=self.paths)
return traces_to_display(
self.traces, self.trace_profile_events, paths=self.paths
)
return "<unfinished torchprof.profile>"

def __call__(self, *args, **kwargs):
return self._model(*args, **kwargs)

def _hook_trace(self, trace):
[path, leaf, module] = trace
if (self.paths is not None and path in self.paths) or (self.paths is None and leaf):
if (self.paths is not None and path in self.paths) or (
self.paths is None and leaf
):
_forward = module.forward
self._forwards[path] = _forward

Expand All @@ -79,7 +83,9 @@ def wrap_forward(*args, **kwargs):

def _remove_hook_trace(self, trace):
[path, leaf, module] = trace
if (self.paths is not None and path in self.paths) or (self.paths is None and leaf):
if (self.paths is not None and path in self.paths) or (
self.paths is None and leaf
):
module.forward = self._forwards[path]

def raw(self):
Expand All @@ -89,18 +95,21 @@ def raw(self):
def display(self, show_events=False):
if self.exited:
return traces_to_display(
self.traces, self.trace_profile_events, show_events=show_events, paths=self.paths
self.traces,
self.trace_profile_events,
show_events=show_events,
paths=self.paths,
)
return "<unfinished torchprof.profile>"


def flatten_tree(t, depth=0):
l = []
flat = []
for name, st in t.items():
measures = st.pop(None, None)
l.append([depth, name, measures])
l.extend(flatten_tree(st, depth=depth + 1))
return l
flat.append([depth, name, measures])
flat.extend(flatten_tree(st, depth=depth + 1))
return flat


def traces_to_display(traces, trace_events, show_events=False, paths=None):
Expand All @@ -116,7 +125,9 @@ def traces_to_display(traces, trace_events, show_events=False, paths=None):
for depth, name in enumerate(path, 1):
if name not in current_tree:
current_tree[name] = OrderedDict()
if depth == len(path) and ((paths is None and leaf) or (paths is not None and path in paths)):
if depth == len(path) and (
(paths is None and leaf) or (paths is not None and path in paths)
):
# tree measurements have key None, avoiding name conflict
if show_events:
for event in events:
Expand Down

0 comments on commit 54b554b

Please sign in to comment.