doc(pytorch): update docs/pytorch.md (#649)

Co-authored-by: zhulin.zzz <zhul289@chinaunicom.cn> afbe61aebc
This commit is contained in:
jaywcjlove
2024-05-13 08:34:45 +00:00
parent f226f242f4
commit 9910c6d135
5 changed files with 93 additions and 62 deletions

View File

@ -35,7 +35,7 @@
备忘清单为您提供了 <a href="https://pytorch.org/">Pytorch</a> 基本语法和初步应用参考</p>
</div></header><div class="menu-tocs"><div class="menu-btn"><svg aria-hidden="true" fill="currentColor" height="1em" width="1em" viewBox="0 0 16 16" version="1.1" data-view-component="true">
<path fill-rule="evenodd" d="M2 4a1 1 0 100-2 1 1 0 000 2zm3.75-1.5a.75.75 0 000 1.5h8.5a.75.75 0 000-1.5h-8.5zm0 5a.75.75 0 000 1.5h8.5a.75.75 0 000-1.5h-8.5zm0 5a.75.75 0 000 1.5h8.5a.75.75 0 000-1.5h-8.5zM3 8a1 1 0 11-2 0 1 1 0 012 0zm-1 6a1 1 0 100-2 1 1 0 000 2z"></path>
</svg></div><div class="menu-modal"><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#入门">入门</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#介绍">介绍</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#认识-pytorch">认识 Pytorch</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#创建一个全零矩阵">创建一个全零矩阵</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#数据创建张量">数据创建张量</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#pytorch-的基本语法">Pytorch 的基本语法</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作1">加法操作(1)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作2">加法操作(2)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作3">加法操作(3)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作4">加法操作(4)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#张量操作">张量操作</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#张量形状">张量形状</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#取张量元素">取张量元素</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#torch-tensor-和-numpy-array互换">Torch Tensor 和 Numpy array互换</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#torch-tensor-转换为-numpy-array">Torch Tensor 转换为 Numpy array</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#numpy-array转换为torch-tensor">Numpy array转换为Torch Tensor</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#squeeze函数">squeeze函数</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#unsqueeze函数">unsqueeze函数</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#导入-imports">导入 Imports</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#一般">一般</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#神经网络-api">神经网络 API</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#torchscript-和-jit">Torchscript 和 JIT</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#onnx">ONNX</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#vision">Vision</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#分布式训练">分布式训练</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#另见">另见</a></div></div><div class="h1wrap-body"><div class="wrap h2body-exist"><div class="wrap-header h2wrap"><h2 id="入门"><a aria-hidden="true" tabindex="-1" href="#入门"><span class="icon icon-link"></span></a>入门</h2><div class="wrap-body">
</svg></div><div class="menu-modal"><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#入门">入门</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#介绍">介绍</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#认识-pytorch">认识 Pytorch</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#创建一个全零矩阵">创建一个全零矩阵</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#数据创建张量">数据创建张量</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#pytorch-的基本语法">Pytorch 的基本语法</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作1">加法操作(1)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作2">加法操作(2)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作3">加法操作(3)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#加法操作4">加法操作(4)</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#张量操作">张量操作</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#张量形状">张量形状</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#取张量元素">取张量元素</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#torch-tensor-和-numpy-array互换">Torch Tensor 和 Numpy array互换</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#torch-tensor-转换为-numpy-array">Torch Tensor 转换为 Numpy array</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#numpy-array转换为torch-tensor">Numpy array转换为Torch Tensor</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#squeeze函数">squeeze函数</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#unsqueeze函数">unsqueeze函数</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#cuda-相关">Cuda 相关</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#检查-cuda-是否可用">检查 Cuda 是否可用</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#列出-gpu-设备">列出 GPU 设备</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#将模型张量等数据在-gpu-和内存之间进行搬运">将模型、张量等数据在 GPU 和内存之间进行搬运</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#导入-imports">导入 Imports</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#一般">一般</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#神经网络-api">神经网络 API</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#torchscript-和-jit">Torchscript 和 JIT</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#onnx">ONNX</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#vision">Vision</a><a aria-hidden="true" class="leve3 tocs-link" data-num="3" href="#分布式训练">分布式训练</a><a aria-hidden="true" class="leve2 tocs-link" data-num="2" href="#另见">另见</a></div></div><div class="h1wrap-body"><div class="wrap h2body-exist"><div class="wrap-header h2wrap"><h2 id="入门"><a aria-hidden="true" tabindex="-1" href="#入门"><span class="icon icon-link"></span></a>入门</h2><div class="wrap-body">
</div></div><div class="h2wrap-body"><div class="wrap h3body-not-exist"><div class="wrap-header h3wrap"><h3 id="介绍"><a aria-hidden="true" tabindex="-1" href="#介绍"><span class="icon icon-link"></span></a>介绍</h3><div class="wrap-body">
<ul>
<li><a href="https://pytorch.org/">Pytorch 官网</a> <em>(pytorch.org)</em></li>
@ -175,6 +175,35 @@
</span><span class="code-line"><span class="token operator">>></span><span class="token operator">></span> torch<span class="token punctuation">.</span>unsqueeze<span class="token punctuation">(</span>x<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">)</span><span class="token punctuation">.</span>shape <span class="token comment"># 既可以是函数,也可以是方法</span>
</span><span class="code-line">torch<span class="token punctuation">.</span>Size<span class="token punctuation">(</span><span class="token punctuation">[</span><span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">28</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">]</span><span class="token punctuation">)</span>
</span></code></pre>
</div></div></div></div></div><div class="wrap h2body-exist"><div class="wrap-header h2wrap"><h2 id="cuda-相关"><a aria-hidden="true" tabindex="-1" href="#cuda-相关"><span class="icon icon-link"></span></a>Cuda 相关</h2><div class="wrap-body">
</div></div><div class="h2wrap-body"><div class="wrap h3body-not-exist"><div class="wrap-header h3wrap"><h3 id="检查-cuda-是否可用"><a aria-hidden="true" tabindex="-1" href="#检查-cuda-是否可用"><span class="icon icon-link"></span></a>检查 Cuda 是否可用</h3><div class="wrap-body">
<pre class="language-python"><code class="language-python code-highlight"><span class="code-line"><span class="token operator">>></span><span class="token operator">></span> <span class="token keyword">import</span> torch<span class="token punctuation">.</span>cuda
</span><span class="code-line"><span class="token operator">>></span><span class="token operator">></span> torch<span class="token punctuation">.</span>cuda<span class="token punctuation">.</span>is_available<span class="token punctuation">(</span><span class="token punctuation">)</span>
</span><span class="code-line"><span class="token operator">>></span><span class="token operator">></span> <span class="token boolean">True</span>
</span></code></pre>
</div></div></div><div class="wrap h3body-not-exist"><div class="wrap-header h3wrap"><h3 id="列出-gpu-设备"><a aria-hidden="true" tabindex="-1" href="#列出-gpu-设备"><span class="icon icon-link"></span></a>列出 GPU 设备</h3><div class="wrap-body">
<pre class="language-python"><code class="language-python code-highlight"><span class="code-line"><span class="token keyword">import</span> torch
</span><span class="code-line">device_count <span class="token operator">=</span> torch<span class="token punctuation">.</span>cuda<span class="token punctuation">.</span>device_count<span class="token punctuation">(</span><span class="token punctuation">)</span>
</span><span class="code-line"><span class="token keyword">print</span><span class="token punctuation">(</span><span class="token string">"CUDA 设备"</span><span class="token punctuation">)</span>
</span><span class="code-line"><span class="token keyword">for</span> i <span class="token keyword">in</span> <span class="token builtin">range</span><span class="token punctuation">(</span>device_count<span class="token punctuation">)</span><span class="token punctuation">:</span>
</span><span class="code-line"> device_name <span class="token operator">=</span> torch<span class="token punctuation">.</span>cuda<span class="token punctuation">.</span>get_device_name<span class="token punctuation">(</span>i<span class="token punctuation">)</span>
</span><span class="code-line"> total_memory <span class="token operator">=</span> torch<span class="token punctuation">.</span>cuda<span class="token punctuation">.</span>get_device_properties<span class="token punctuation">(</span>i<span class="token punctuation">)</span><span class="token punctuation">.</span>total_memory <span class="token operator">/</span> <span class="token punctuation">(</span><span class="token number">1024</span> <span class="token operator">**</span> <span class="token number">3</span><span class="token punctuation">)</span>
</span><span class="code-line"> <span class="token keyword">print</span><span class="token punctuation">(</span><span class="token string-interpolation"><span class="token string">f"├── 设备 </span><span class="token interpolation"><span class="token punctuation">{</span>i<span class="token punctuation">}</span></span><span class="token string">: </span><span class="token interpolation"><span class="token punctuation">{</span>device_name<span class="token punctuation">}</span></span><span class="token string">, 容量: </span><span class="token interpolation"><span class="token punctuation">{</span>total_memory<span class="token punctuation">:</span><span class="token format-spec">.2f</span><span class="token punctuation">}</span></span><span class="token string"> GiB"</span></span><span class="token punctuation">)</span>
</span><span class="code-line"><span class="token keyword">print</span><span class="token punctuation">(</span><span class="token string">"└── (结束)"</span><span class="token punctuation">)</span>
</span></code></pre>
</div></div></div><div class="wrap h3body-not-exist"><div class="wrap-header h3wrap"><h3 id="将模型张量等数据在-gpu-和内存之间进行搬运"><a aria-hidden="true" tabindex="-1" href="#将模型张量等数据在-gpu-和内存之间进行搬运"><span class="icon icon-link"></span></a>将模型、张量等数据在 GPU 和内存之间进行搬运</h3><div class="wrap-body">
<pre class="language-python"><code class="language-python code-highlight"><span class="code-line"><span class="token keyword">import</span> torch
</span><span class="code-line"><span class="token comment"># Replace 0 to your GPU device index. or use "cuda" directly.</span>
</span><span class="code-line">device <span class="token operator">=</span> <span class="token string-interpolation"><span class="token string">f"cuda:0"</span></span>
</span><span class="code-line"><span class="token comment"># Move to GPU</span>
</span><span class="code-line">tensor_m <span class="token operator">=</span> torch<span class="token punctuation">.</span>tensor<span class="token punctuation">(</span><span class="token punctuation">[</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">3</span><span class="token punctuation">]</span><span class="token punctuation">)</span>
</span><span class="code-line">tensor_g <span class="token operator">=</span> tensor_m<span class="token punctuation">.</span>to<span class="token punctuation">(</span>device<span class="token punctuation">)</span>
</span><span class="code-line">model_m <span class="token operator">=</span> torch<span class="token punctuation">.</span>nn<span class="token punctuation">.</span>Linear<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span>
</span><span class="code-line">model_g <span class="token operator">=</span> model_m<span class="token punctuation">.</span>to<span class="token punctuation">(</span>device<span class="token punctuation">)</span>
</span><span class="code-line"><span class="token comment"># Move back.</span>
</span><span class="code-line">tensor_m <span class="token operator">=</span> tensor_g<span class="token punctuation">.</span>cpu<span class="token punctuation">(</span><span class="token punctuation">)</span>
</span><span class="code-line">model_m <span class="token operator">=</span> model_g<span class="token punctuation">.</span>cpu<span class="token punctuation">(</span><span class="token punctuation">)</span>
</span></code></pre>
</div></div></div></div></div><div class="wrap h2body-exist"><div class="wrap-header h2wrap"><h2 id="导入-imports"><a aria-hidden="true" tabindex="-1" href="#导入-imports"><span class="icon icon-link"></span></a>导入 Imports</h2><div class="wrap-body">
</div></div><div class="h2wrap-body"><div class="wrap h3body-not-exist"><div class="wrap-header h3wrap"><h3 id="一般"><a aria-hidden="true" tabindex="-1" href="#一般"><span class="icon icon-link"></span></a>一般</h3><div class="wrap-body">
<pre class="wrap-text"><code class="language-python code-highlight"><span class="code-line"><span class="token comment"># 根包</span>