Skip to content

Commit

Permalink
Add apple silicon GPU Acceleration Support ("mps") (#335)
Browse files Browse the repository at this point in the history
* Add apple silicon GPU Acceleration Support

* format fix

* update

* update

* update

* Fix module not found error

---------

Co-authored-by: rusty1s <[email protected]>
  • Loading branch information
NripeshN and rusty1s committed Jul 19, 2023
1 parent 40693ab commit 14781cb
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 5 deletions.
16 changes: 11 additions & 5 deletions benchmark/main.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import time
import os.path as osp
import argparse
import itertools
import os.path as osp
import time

import argparse
import wget
import torch
import wget
from scipy.io import loadmat

from torch_scatter import scatter_add

from torch_sparse.tensor import SparseTensor

short_rows = [
Expand Down Expand Up @@ -62,6 +62,9 @@ def time_func(func, x):
try:
if torch.cuda.is_available():
torch.cuda.synchronize()
elif torch.backends.mps.is_available():
import torch.mps
torch.mps.synchronize()
t = time.perf_counter()

if not args.with_backward:
Expand All @@ -77,6 +80,9 @@ def time_func(func, x):

if torch.cuda.is_available():
torch.cuda.synchronize()
elif torch.backends.mps.is_available():
import torch.mps
torch.mps.synchronize()
return time.perf_counter() - t
except RuntimeError as e:
if 'out of memory' not in str(e):
Expand Down
2 changes: 2 additions & 0 deletions torch_sparse/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices += [torch.device('cuda:0')]
if torch.backends.mps.is_available():
devices += [torch.device('mps')]


def tensor(x: Any, dtype: torch.dtype, device: torch.device):
Expand Down

0 comments on commit 14781cb

Please sign in to comment.