Skip to content

Commit

Permalink
Update documentations
Browse files Browse the repository at this point in the history
  • Loading branch information
actions-user committed Jul 29, 2024
1 parent 6f3ec48 commit 19746ed
Show file tree
Hide file tree
Showing 35 changed files with 828 additions and 219 deletions.
30 changes: 24 additions & 6 deletions _modules/hippynn/custom_kernels.html
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,16 @@ <h1>Source code for hippynn.custom_kernels</h1><div class="highlight"><pre>
<span class="k">except</span> <span class="ne">ImportError</span><span class="p">:</span>
<span class="k">pass</span>

<span class="k">try</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">triton</span>

<span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="s2">&quot;triton&quot;</span><span class="p">)</span>
<span class="k">except</span> <span class="ne">ImportError</span><span class="p">:</span>
<span class="k">pass</span>

<span class="k">if</span> <span class="ow">not</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;Numba or cupy not available: Custom Kernels will be disabled.&quot;</span><span class="p">)</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span>
<span class="s2">&quot;Triton, cupy and numba are not available: Custom kernels will be disabled and performance maybe be degraded.&quot;</span><span class="p">)</span>

<span class="n">CUSTOM_KERNELS_ACTIVE</span> <span class="o">=</span> <span class="kc">False</span>

Expand Down Expand Up @@ -155,46 +163,56 @@ <h1>Source code for hippynn.custom_kernels</h1><div class="highlight"><pre>
<span class="sd"> Activate or deactivate custom kernels for interaction.</span>

<span class="sd"> :param active: If true, set custom kernels to the best available. If False, turn them off and default to pytorch.</span>
<span class="sd"> If &quot;numba&quot; or &quot;cupy&quot;, use those implementations explicitly. If &quot;auto&quot;, use best available.</span>
<span class="sd"> If &quot;triton&quot;, &quot;numba&quot; or &quot;cupy&quot;, use those implementations explicitly. If &quot;auto&quot;, use best available.</span>
<span class="sd"> :return: None</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">global</span> <span class="n">envsum</span><span class="p">,</span> <span class="n">sensesum</span><span class="p">,</span> <span class="n">featsum</span><span class="p">,</span> <span class="n">CUSTOM_KERNELS_ACTIVE</span>

<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">active</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
<span class="n">active</span> <span class="o">=</span> <span class="n">active</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span>

<span class="k">if</span> <span class="n">active</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">[</span><span class="kc">True</span><span class="p">,</span> <span class="kc">False</span><span class="p">,</span> <span class="s2">&quot;numba&quot;</span><span class="p">,</span> <span class="s2">&quot;cupy&quot;</span><span class="p">,</span> <span class="s2">&quot;pytorch&quot;</span><span class="p">,</span> <span class="s2">&quot;auto&quot;</span><span class="p">]:</span>
<span class="k">if</span> <span class="n">active</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">[</span><span class="kc">True</span><span class="p">,</span> <span class="kc">False</span><span class="p">,</span> <span class="s2">&quot;triton&quot;</span><span class="p">,</span> <span class="s2">&quot;numba&quot;</span><span class="p">,</span> <span class="s2">&quot;cupy&quot;</span><span class="p">,</span> <span class="s2">&quot;pytorch&quot;</span><span class="p">,</span> <span class="s2">&quot;auto&quot;</span><span class="p">]:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Unrecognized custom kernel implementation: </span><span class="si">{</span><span class="n">active</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>

<span class="n">active_map</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;auto&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">,</span> <span class="s2">&quot;pytorch&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">}</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="k">if</span> <span class="n">active</span> <span class="o">==</span> <span class="s2">&quot;auto&quot;</span> <span class="ow">or</span> <span class="n">active</span> <span class="o">==</span> <span class="s2">&quot;pytorch&quot;</span><span class="p">:</span>
<span class="n">active</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">elif</span> <span class="n">active</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Numba or cupy was not found. Custom kernels are not available.&quot;</span><span class="p">)</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="s2">&quot;Triton, numba and cupy were not found. Custom kernels are not available, but they were required by library settings.&quot;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">active</span> <span class="o">=</span> <span class="n">active_map</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="n">active</span><span class="p">,</span> <span class="n">active</span><span class="p">)</span>

<span class="c1"># Handle fallback to pytorch kernels.</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">active</span><span class="p">:</span>
<span class="n">envsum</span> <span class="o">=</span> <span class="n">env_pytorch</span><span class="o">.</span><span class="n">envsum</span>
<span class="n">sensesum</span> <span class="o">=</span> <span class="n">env_pytorch</span><span class="o">.</span><span class="n">sensesum</span>
<span class="n">featsum</span> <span class="o">=</span> <span class="n">env_pytorch</span><span class="o">.</span><span class="n">featsum</span>
<span class="n">CUSTOM_KERNELS_ACTIVE</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">return</span>

<span class="c1"># Select custom kernel implementation</span>

<span class="k">if</span> <span class="ow">not</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Numba was not found. Custom kernels are not available.&quot;</span><span class="p">)</span>

<span class="k">if</span> <span class="n">active</span> <span class="ow">is</span> <span class="kc">True</span><span class="p">:</span>
<span class="k">if</span> <span class="s2">&quot;cupy&quot;</span> <span class="ow">in</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="k">if</span> <span class="s2">&quot;triton&quot;</span> <span class="ow">in</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="n">active</span> <span class="o">=</span> <span class="s2">&quot;triton&quot;</span>
<span class="k">elif</span> <span class="s2">&quot;cupy&quot;</span> <span class="ow">in</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="n">active</span> <span class="o">=</span> <span class="s2">&quot;cupy&quot;</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">active</span> <span class="o">=</span> <span class="s2">&quot;numba&quot;</span>

<span class="k">if</span> <span class="n">active</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">CUSTOM_KERNELS_AVAILABLE</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Unavailable custom kernel implementation: </span><span class="si">{</span><span class="n">active</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>

<span class="k">if</span> <span class="n">active</span> <span class="o">==</span> <span class="s2">&quot;cupy&quot;</span><span class="p">:</span>
<span class="k">if</span> <span class="n">active</span> <span class="o">==</span> <span class="s2">&quot;triton&quot;</span><span class="p">:</span>
<span class="kn">from</span> <span class="nn">.env_triton</span> <span class="kn">import</span> <span class="n">envsum</span> <span class="k">as</span> <span class="n">triton_envsum</span><span class="p">,</span> <span class="n">sensesum</span> <span class="k">as</span> <span class="n">triton_sensesum</span><span class="p">,</span> <span class="n">featsum</span> <span class="k">as</span> <span class="n">triton_featsum</span>

<span class="n">envsum</span><span class="p">,</span> <span class="n">sensesum</span><span class="p">,</span> <span class="n">featsum</span> <span class="o">=</span> <span class="n">autograd_wrapper</span><span class="o">.</span><span class="n">wrap_envops</span><span class="p">(</span><span class="n">triton_envsum</span><span class="p">,</span> <span class="n">triton_sensesum</span><span class="p">,</span> <span class="n">triton_featsum</span><span class="p">)</span>
<span class="k">elif</span> <span class="n">active</span> <span class="o">==</span> <span class="s2">&quot;cupy&quot;</span><span class="p">:</span>
<span class="n">_check_numba</span><span class="p">()</span>
<span class="n">_check_cupy</span><span class="p">()</span>
<span class="kn">from</span> <span class="nn">.env_cupy</span> <span class="kn">import</span> <span class="n">cupy_envsum</span><span class="p">,</span> <span class="n">cupy_featsum</span><span class="p">,</span> <span class="n">cupy_sensesum</span>
Expand Down
Loading

0 comments on commit 19746ed

Please sign in to comment.