查看CUDA版本

使用nvidia-smi命令

image-20211110182441973

在NVIDIA控制面板查看

image-20211110182614465

安装Pytorch

打开官网PyTorch下滑到安装页面

image-20211110182716438

image-20211110183320041

选择对应版本复制命令进行安装

image-20211110183626332

测试安装是否成功

1
2
3
import torch
print(torch.__version__)
print(torch.cuda.is_available())

image-20211110184108366

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch
import time
from torch import autograd
#GPU加速
print(torch.__version__)
print(torch.cuda.is_available())

a=torch.randn(10000,1000)
b=torch.randn(1000,10000)
print(a)
print(b)
t0=time.time()
c=torch.matmul(a,b)
t1=time.time()

print(a.device,t1-t0,c.norm(2))

device=torch.device('cuda')
print(device)
a=a.to(device)
b=b.to(device)

t0=time.time()
c=torch.matmul(a,b)
t2=time.time()
print(a.device,t2-t0,c.norm(2))


t0=time.time()
c=torch.matmul(a,b)
t2=time.time()

print(a.device,t2-t0,c.norm(2))

image-20211110184435900

1
2
3
4
5
6
7
8
9
10
import torch
flag = torch.cuda.is_available()
print(flag)

ngpu= 1
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
print(device)
print(torch.cuda.get_device_name(0))
print(torch.rand(3,3).cuda())

image-20211110184612204