Skip to content

Commit

Permalink
Move blogs and publications page to top level (#3207)
Browse files Browse the repository at this point in the history
  • Loading branch information
jingxu10 authored Aug 20, 2024
1 parent 4605486 commit c477c55
Show file tree
Hide file tree
Showing 6 changed files with 150 additions and 31 deletions.
10 changes: 5 additions & 5 deletions .github/workflows/publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
echo -e "import argparse\nfrom os import listdir\nfrom os.path import isdir, join\nimport re\nclass cDirs:\n def __init__(self, str_ver):\n self.major = 0\n self.minor = 0\n self.patch = 0\n self.post = ''\n matches = re.match('(\d+)\.(\d+)\.(\d+)(.*)', str_ver)\n if matches and len(matches.groups()) == 4:\n self.major = int(matches.group(1))\n self.minor = int(matches.group(2))\n self.patch = int(matches.group(3))\n self.post = matches.group(4)\n def __repr__(self):\n return f'{self.major}.{self.minor}.{self.patch}{self.post}'\nparser = argparse.ArgumentParser()\nparser.add_argument('path')\nargs = parser.parse_args()\ndirs = [cDirs(d) for d in listdir(args.path) if isdir(join(args.path, d)) and d != '.git']\ndirs.sort(key=lambda x: (x.major, x.minor, x.patch, x.post), reverse=True)\nfor d in dirs:\n if str(d) != '0.0.0':\n print(d)" > versions_scan.py
LATEST_CPU=""
LATEST_XPU=""
sed -i "/<li class=\"toctree-l1\">/d" index.html
sed -i "/<li class=\"toctree-l1\">/d" navigation.html
txt="\ <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"cpu/latest\">latest</a></li>"
while read -r line;
do
Expand All @@ -28,9 +28,9 @@ jobs:
fi
txt="${txt}\n <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"cpu/${line}\">v${line}</a></li>"
done < <(python versions_scan.py ./cpu)
ln=$(grep "<ul id=\"ul_cpu\">" -n index.html | cut -d ":" -f 1)
ln=$(grep "<ul id=\"ul_cpu\">" -n navigation.html | cut -d ":" -f 1)
ln=$((ln+1))
sed -i "${ln} i ${txt}" index.html
sed -i "${ln} i ${txt}" navigation.html
txt="\ <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"xpu/latest\">latest</a></li>"
while read -r line;
do
Expand All @@ -39,9 +39,9 @@ jobs:
fi
txt="${txt}\n <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"xpu/${line}\">v${line}</a></li>"
done < <(python versions_scan.py ./xpu)
ln=$(grep "<ul id=\"ul_gpu\">" -n index.html | cut -d ":" -f 1)
ln=$(grep "<ul id=\"ul_gpu\">" -n navigation.html | cut -d ":" -f 1)
ln=$((ln+1))
sed -i "${ln} i ${txt}" index.html
sed -i "${ln} i ${txt}" navigation.html
cd cpu
if [ -L latest ]; then
rm latest
Expand Down
3 changes: 2 additions & 1 deletion _css/theme.css

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions _scripts/load_navi.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
$(document).ready(function() {
$("#nav-side").load('navigation.html');
});
110 changes: 110 additions & 0 deletions blogs.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
<meta charset="utf-8" /><meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />
<meta content="This website introduces Intel® Extension for PyTorch*" name="description" />
<meta content="Intel optimization, PyTorch, Intel® Extension for PyTorch*, GPU, discrete GPU, Intel discrete GPU" name="keywords" />

<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Welcome to Intel® Extension for PyTorch* Documentation!</title>
<link rel="stylesheet" href="_css/theme.css" type="text/css" />
<link rel="stylesheet" href="_css/custom.css" type="text/css" />
<script src="_scripts/jquery.js"></script>
<script src="_scripts/theme.js"></script>
<script src="_scripts/load_navi.js"></script>
<script type="text/javascript">
// Configure TMS settings
window.wapProfile = 'profile-microsite'; // This is mapped by WAP authorize value
window.wapLocalCode = 'us-en'; // Dynamically set per localized site, see mapping table for values
window.wapSection = "intel-extension-for-pytorch"; // WAP team will give you a unique section for your site
window.wapEnv = 'prod'; // environment to be use in Adobe Tags.
var wapSinglePage = false; // Include this variable only if your site is a single page application, such as one developed with the React framework
// Load TMS
(() => {
let url = 'https://www.intel.com/content/dam/www/global/wap/main/wap-microsite.js';
let po = document.createElement('script'); po.type = 'text/javascript'; po.async = true; po.src = url;
let s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
}) ();
</script>
</head>

<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll" id="nav-side">
</div>
</nav>

<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<nav class="wy-nav-top" aria-label="Mobile navigation menu" >
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="#">intel_extension_for_pytorch</a>
</nav>

<div class="wy-nav-content">
<div class="rst-content">
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<section id="blogs-publications">
<h1>Blogs &amp; Publications<a class="headerlink" href="#blogs-publications" title="Link to this heading"></a></h1>
<ul class="simple">
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/news/llama2.html">Accelerate Llama 2 with Intel AI Hardware and Software Optimizations, Jul 2023</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/technical/accelerate-pytorch-training-inference-on-amx.html">Accelerate PyTorch* Training and Inference Performance using Intel® AMX, Jul 2023</a></p></li>
<li><p><a class="reference external" href="https://networkbuilders.intel.com/solutionslibrary/intel-deep-learning-boost-intel-dl-boost-improve-inference-performance-of-hugging-face-bert-base-model-in-google-cloud-platform-gcp-technology-guide">Intel® Deep Learning Boost (Intel® DL Boost) - Improve Inference Performance of Hugging Face BERT Base Model in Google Cloud Platform (GCP) Technology Guide, Apr 2023</a></p></li>
<li><p><a class="reference external" href="https://www.youtube.com/watch?v=Id-rE2Q7xZ0&amp;t=1s">Get Started with Intel® Extension for PyTorch* on GPU | Intel Software, Mar 2023</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/technical/accelerate-pytorch-int8-inf-with-new-x86-backend.html">Accelerate PyTorch* INT8 Inference with New “X86” Quantization Backend on X86 CPUs, Mar 2023</a></p></li>
<li><p><a class="reference external" href="https://huggingface.co/blog/intel-sapphire-rapids">Accelerating PyTorch Transformers with Intel Sapphire Rapids, Part 1, Jan 2023</a></p></li>
<li><p><a class="reference external" href="https://networkbuilders.intel.com/solutionslibrary/intel-deep-learning-boost-improve-inference-performance-of-bert-base-model-from-hugging-face-for-network-security-technology-guide">Intel® Deep Learning Boost - Improve Inference Performance of BERT Base Model from Hugging Face for Network Security Technology Guide, Jan 2023</a></p></li>
<li><p><a class="reference external" href="https://www.youtube.com/watch?v=066_Jd6cwZg">Scaling inference on CPUs with TorchServe, PyTorch Conference, Dec 2022</a></p></li>
<li><p><a class="reference external" href="https://www.youtube.com/watch?v=SE56wFXdvP4&amp;t=1s">What is New in Intel Extension for PyTorch, PyTorch Conference, Dec 2022</a></p></li>
<li><p><a class="reference external" href="https://www.pyg.org/ns-newsarticle-accelerating-pyg-on-intel-cpus">Accelerating PyG on Intel CPUs, Dec 2022</a></p></li>
<li><p><a class="reference external" href="https://www.oneapi.io/event-sessions/accelerating-pytorch-deep-learning-models-on-intel-xpus-2-ai-hpc-2022/">Accelerating PyTorch Deep Learning Models on Intel XPUs, Dec, 2022</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/technical/introducing-intel-extension-for-pytorch-for-gpus.html">Introducing the Intel® Extension for PyTorch* for GPUs, Dec 2022</a></p></li>
<li><p><a class="reference external" href="https://towardsdatascience.com/pytorch-stable-diffusion-using-hugging-face-and-intel-arc-77010e9eead6">PyTorch Stable Diffusion Using Hugging Face and Intel Arc, Nov 2022</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/technical/pytorch-1-13-new-potential-for-ai-developers.html">PyTorch 1.13: New Potential for AI Developers to Enhance Model Performance and Accuracy, Nov 2022</a></p></li>
<li><p><a class="reference external" href="https://medium.com/intel-analytics-software/easy-quantization-in-pytorch-using-fine-grained-fx-80be2c4bc2d6">Easy Quantization in PyTorch Using Fine-Grained FX, Sep 2022</a></p></li>
<li><p><a class="reference external" href="https://pytorch.org/blog/empowering-pytorch-on-intel-xeon-scalable-processors-with-bfloat16/">Empowering PyTorch on Intel® Xeon® Scalable processors with Bfloat16, Aug 2022</a></p></li>
<li><p><a class="reference external" href="https://pytorch.org/blog/accelerating-pytorch-vision-models-with-channels-last-on-cpu/">Accelerating PyTorch Vision Models with Channels Last on CPU, Aug 2022</a></p></li>
<li><p><a class="reference external" href="https://medium.com/intel-analytics-software/one-click-enable-intel-neural-compressor-features-in-pytorch-scripts-5d4e31f5a22b">One-Click Enabling of Intel Neural Compressor Features in PyTorch Scripts, Aug 2022</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/technical/increase-pytorch-inference-throughput-by-4x.html">Increase PyTorch Inference Throughput by 4x, Jul 2022</a></p></li>
<li><p><a class="reference external" href="https://medium.com/pytorch/pytorch-inference-acceleration-with-intel-neural-compressor-842ef4210d7d">PyTorch Inference Acceleration with Intel® Neural Compressor, Jun 2022</a></p></li>
<li><p><a class="reference external" href="https://medium.com/pytorch/accelerating-pytorch-with-intel-extension-for-pytorch-3aef51ea3722">Accelerating PyTorch with Intel® Extension for PyTorch, May 2022</a></p></li>
<li><p><a class="reference external" href="https://pytorch.org/tutorials/intermediate/torchserve_with_ipex.html">Grokking PyTorch Intel CPU performance from first principles (parts 1), Apr 2022</a></p></li>
<li><p><a class="reference external" href="https://pytorch.org/tutorials/intermediate/torchserve_with_ipex_2.html">Grokking PyTorch Intel CPU performance from first principles (parts 2), Apr 2022</a></p></li>
<li><p><a class="reference external" href="https://medium.com/pytorch/grokking-pytorch-intel-cpu-performance-from-first-principles-7e39694412db">Grokking PyTorch Intel CPU performance from first principles, Apr 2022</a></p></li>
<li><p><a class="reference external" href="https://community.intel.com/t5/Blogs/Tech-Innovation/Artificial-Intelligence-AI/KT-Optimizes-Performance-for-Personalized-Text-to-Speech/post/1337757">KT Optimizes Performance for Personalized Text-to-Speech, Nov 2021</a></p></li>
<li><p><a class="reference external" href="https://huggingface.co/blog/accelerating-pytorch">Accelerating PyTorch distributed fine-tuning with Intel technologies, Nov 2021</a></p></li>
<li><p><a class="reference external" href="https://huggingface.co/blog/bert-cpu-scaling-part-1">Scaling up BERT-like model Inference on modern CPU - parts 1, Apr 2021</a></p></li>
<li><p><a class="reference external" href="https://huggingface.co/blog/bert-cpu-scaling-part-2">Scaling up BERT-like model Inference on modern CPU - parts 2, Nov 2021</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/customer-spotlight/stories/naver-ocr-customer-story.html">NAVER: Low-Latency Machine-Learning Inference</a></p></li>
<li><p><a class="reference external" href="https://pytorch.org/tutorials/recipes/recipes/intel_extension_for_pytorch.html">Intel® Extensions for PyTorch, Feb 2021</a></p></li>
<li><p><a class="reference external" href="https://pytorch.medium.com/optimizing-dlrm-by-using-pytorch-with-oneccl-backend-9f85b8ef6929">Optimizing DLRM by using PyTorch with oneCCL Backend, Feb 2021</a></p></li>
<li><p><a class="reference external" href="https://medium.com/pytorch/accelerate-pytorch-with-ipex-and-onednn-using-intel-bf16-technology-dca5b8e6b58f">Accelerate PyTorch with IPEX and oneDNN using Intel BF16 Technology, Feb 2021</a> <em>Note</em>: APIs mentioned in it are deprecated.</p></li>
<li><p><a class="reference external" href="https://community.intel.com/t5/Blogs/Tech-Innovation/Artificial-Intelligence-AI/Intel-and-Facebook-Accelerate-PyTorch-Performance-with-3rd-Gen/post/1335659">Intel and Facebook Accelerate PyTorch Performance with 3rd Gen Intel® Xeon® Processors and Intel® Deep Learning Boost’s new BFloat16 capability, Jun 2020</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/case-study/intel-and-facebook-collaborate-to-boost-pytorch-cpu-performance.html">Intel and Facebook* collaborate to boost PyTorch* CPU performance, Apr 2019</a></p></li>
<li><p><a class="reference external" href="https://www.intel.com/content/www/us/en/developer/articles/technical/intel-and-facebook-collaborate-to-boost-caffe2-performance-on-intel-cpu-s.html">Intel and Facebook* Collaborate to Boost Caffe*2 Performance on Intel CPU’s, Apr 2017</a></p></li>
</ul>
</section>
</div>
</div>
<footer>
<hr/>
<div role="contentinfo">
<p>&#169; Copyright Intel(R).</p>
</div>
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
provided by <a href="https://readthedocs.org">Read the Docs</a>.
<p></p><div><a href='https://www.intel.com/content/www/us/en/privacy/intel-cookie-notice.html' data-cookie-notice='true'>Cookies</a> <a href='https://www.intel.com/content/www/us/en/privacy/intel-privacy-notice.html'>| Privacy</a> <a href="/#" data-wap_ref="dns" id="wap_dns"><small>| Your Privacy Choices</small></a> <a href=https://www.intel.com/content/www/us/en/privacy/privacy-residents-certain-states.html data-wap_ref="nac" id="wap_nac"><small>| Notice at Collection</small></a> </div> <p></p> <div>&copy; Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others. No license (express or implied, by estoppel or otherwise) to any intellectual property rights is granted by this document, with the sole exception that code included in this document is licensed subject to the Zero-Clause BSD open source license (OBSD), <a href='http://opensource.org/licenses/0BSD'>http://opensource.org/licenses/0BSD</a>. </div>
</footer>
</div>
</div>
</section>
</div>
<script>
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
</body>
</html>
41 changes: 16 additions & 25 deletions index.html
Original file line number Diff line number Diff line change
Expand Up @@ -12,36 +12,27 @@
<script src="_scripts/jquery.js"></script>
<script src="_scripts/theme.js"></script>
<script src="_scripts/actions.js"></script>
<script type="text/javascript">
// Configure TMS settings
window.wapProfile = 'profile-microsite'; // This is mapped by WAP authorize value
window.wapLocalCode = 'us-en'; // Dynamically set per localized site, see mapping table for values
window.wapSection = "intel-extension-for-pytorch"; // WAP team will give you a unique section for your site
window.wapEnv = 'prod'; // environment to be use in Adobe Tags.
var wapSinglePage = false; // Include this variable only if your site is a single page application, such as one developed with the React framework
// Load TMS
(() => {
let url = 'https://www.intel.com/content/dam/www/global/wap/main/wap-microsite.js';
let po = document.createElement('script'); po.type = 'text/javascript'; po.async = true; po.src = url;
let s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
}) ();
</script>
<script src="_scripts/load_navi.js"></script>
<script type="text/javascript">
// Configure TMS settings
window.wapProfile = 'profile-microsite'; // This is mapped by WAP authorize value
window.wapLocalCode = 'us-en'; // Dynamically set per localized site, see mapping table for values
window.wapSection = "intel-extension-for-pytorch"; // WAP team will give you a unique section for your site
window.wapEnv = 'prod'; // environment to be use in Adobe Tags.
var wapSinglePage = false; // Include this variable only if your site is a single page application, such as one developed with the React framework
// Load TMS
(() => {
let url = 'https://www.intel.com/content/dam/www/global/wap/main/wap-microsite.js';
let po = document.createElement('script'); po.type = 'text/javascript'; po.async = true; po.src = url;
let s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
}) ();
</script>
</head>

<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
<p id="version-pick"><span><b>Pick a version:</b></span></p>
<p><span><b>CPU</b></span></p>
<ul id="ul_cpu">
</ul>
<p class="menu-separator"></p>
<p<span><b>XPU/GPU</b></span></p>
<ul id="ul_gpu">
</ul>
</div>
<div class="wy-side-scroll" id="nav-side">
</div>
</nav>

Expand Down
14 changes: 14 additions & 0 deletions navigation.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
<!DOCTYPE html>
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
<p class="btn-page"><span><b><a href="index.html">Homepage</a></b></span></p>
<p class="btn-page"><span><b><a href="blogs.html">Blogs & Publications</a></b></span></p>
<p class="menu-separator"></p>
<p id="version-pick"><span><b>Pick a version:</b></span></p>
<p><span><b>CPU</b></span></p>
<ul id="ul_cpu">
</ul>
<p class="menu-separator"></p>
<p<span><b>XPU/GPU</b></span></p>
<ul id="ul_gpu">
</ul>
</div>

0 comments on commit c477c55

Please sign in to comment.