tensorflow、pytorch、mindspore,caffe判断GPU是否可用 及 GPU信息

import tensorflow as tf
# 判断GPU是否可用
flag = tf.test.is_gpu_available()
if flag:
    # 获取GPU信息
    print("CUDA可使用")
    gpu_device_name = tf.test.gpu_device_name()
    print("GPU型号: ", gpu_device_name)
else:
    print("CUDA不可用")
import torch
flag = torch.cuda.is_available()
if flag:
    print("CUDA可使用")
    print("GPU型号: ",torch.cuda.get_device_name())
else:
    print("CUDA不可用")

ngpu= 1
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
print(device)
print(torch.cuda.get_device_name(0))
print(torch.rand(3,3).cuda()) 

# mindspore gpu测试

import numpy as np
from mindspore import Tensor
from mindspore.ops import functional as F
import mindspore.context as context



context.set_context(device_target="GPU")
x = Tensor(np.ones([1,3,3,4]).astype(np.float32))
y = Tensor(np.ones([1,3,3,4]).astype(np.float32))
print(F.tensor_add(x, y))

```
[[[[2. 2. 2. 2.]
   [2. 2. 2. 2.]
   [2. 2. 2. 2.]]

  [[2. 2. 2. 2.]
   [2. 2. 2. 2.]
   [2. 2. 2. 2.]]

  [[2. 2. 2. 2.]
   [2. 2. 2. 2.]
   [2. 2. 2. 2.]]]]
```
# 查看 paddle能够调用 gpu
import paddle
paddle.fluid.is_compiled_with_cuda()
paddle.utils.run_check()
import caffe
caffe.set_mode_gpu()
caffe.__version__


版权声明:本文为Areigninhell原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。