diff --git a/.github/workflows/hello-world.yml b/.github/workflows/hello-world.yml index fcbe26e..4cdf516 100644 --- a/.github/workflows/hello-world.yml +++ b/.github/workflows/hello-world.yml @@ -50,164 +50,180 @@ # path: src/sample_file.txt # #path: ./output_files/sample_file.txt -name: Update index.html in GH Pages and generate global sitemap +# name: Update index.html in GH Pages and generate global sitemap -on: - push: - branches: - - main +# on: push -permissions: write-all +# permissions: write-all -jobs: +# jobs: - run-global-sitemap-script-and-update-gh-pages: +# run-global-sitemap-script-and-update-gh-pages: +# runs-on: ubuntu-latest + +# steps: + +# - name: Install Python +# uses: actions/setup-python@v5 +# with: +# python-version: '3.13' + +# - name: Install dependencies +# shell: bash +# run: | + +# python -m pip install --upgrade pip +# pip install requests + +# - name: Write script +# shell: bash +# run: | + +# cat > /tmp/catsitemap.py << "EOF" + +# import re +# import xml.etree.ElementTree as ET +# import requests +# from xml.dom import minidom + +# def extract_urls_and_headers(url: str) -> tuple: +# """This function extracts projects names and sitemap.xml urls for each project + +# Args: +# url (str): link to the .rst file of the PyAnsys documentation landing page + +# Returns: +# tuple: returns a tuple of list of project names and list of urls to projects' sitemap.xml files +# """ +# try: +# response = requests.get(url, timeout=10) +# except requests.exceptions.Timeout: +# print("Timed out while trying to get request") +# raise + +# content = response.text + +# # Extract section headers and URLs (modify regex based on your needs) +# project_names = [project_name.strip() for project_name in re.findall(r'\.\. grid-item-card:: ([\w\s-]+)', content)] +# urls = re.findall(r':link: (https://[\w./-]+)', content) + +# # Modify URLs +# updated_urls = [re.match(r"^(https:\/\/[^\/]+)", url).group(1) + "/sitemap.xml" for url in urls] + +# # Filter none existent URLS +# valid_project_names = [] +# valid_urls = [] +# for index, url in enumerate(updated_urls): +# if requests.get(url).status_code == 404: +# continue +# else: +# valid_project_names.append(project_names[index]) +# valid_urls.append(url) + +# return valid_project_names, valid_urls + +# def generate_sitemap_index(url: str) -> None: +# """This function generates a sitemap_index.xml file indexing other sitemap.xml files + +# Args: +# url (str): link to the .rst file of the PyAnsys documentation landing page +# """ + +# # Create the root element with namespace +# sitemap_index = ET.Element("sitemapindex", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9") + +# # Build the list of urls +# urls = extract_urls_and_headers(url)[1] + +# # Create sitemap elements for each URL +# for url in urls: +# sitemap = ET.SubElement(sitemap_index, "sitemap") +# loc = ET.SubElement(sitemap, "loc") +# loc.text = url + +# # Format XML with indentation +# rough_string = ET.tostring(sitemap_index, 'utf-8') +# reparsed = minidom.parseString(rough_string) +# pretty_xml = reparsed.toprettyxml(indent=" ") + +# # Create the tree and write to XML file +# with open("globalsitemap.xml", "w") as f: +# f.write(pretty_xml) + + +# # URL of the .rst +# URL = "https://docs.pyansys.com/version/dev/_sources/index.rst.txt" +# generate_sitemap_index(URL) +# EOF + +# - name: Run above script +# working-directory: /tmp +# shell: bash +# run: | +# python catsitemap.py + +# - name: Checkout repository +# uses: actions/checkout@v4 +# #with: +# #ref: gh-pages + +# #- name: Replace 'version/stable' with 'version/dev' in index.html +# #run: | +# # Replace landing page with the dev version +# #cp version/dev/index.html index.html +# #sed -i 's/href="\([^:"]*\)"/href="version\/dev\/\1"/g' index.html +# #sed -i 's/src="\([^:"]*\)"/src="version\/dev\/\1"/g' index.html +# # Replace "version/stable" with "version/dev" in the sitemap.xml +# #sed -i 's/version\/stable/version\/dev/g' sitemap.xml + +# - name: Copy globalsitemap.xml to root of gh-pages +# shell: bash +# run: | +# mv /tmp/globalsitemap.xml /home/runner/work/ci_cd/ci_cd/ + +# - name: "Commit changes" +# uses: EndBug/add-and-commit@v9 +# with: +# default_author: github_actions +# message: "testing action script, especially that file is added" +name: Custom GH Pages Post-processing + +on: push +# workflow_dispatch: +# workflow_call: + +env: + MAIN_PYTHON_VERSION: '3.13' + +jobs: + update-gh-pages: runs-on: ubuntu-latest steps: - - name: Install Python uses: actions/setup-python@v5 with: - python-version: '3.13' + python-version: ${{ env.MAIN_PYTHON_VERSION }} - - name: Install dependencies + - name: Install Python dependencies shell: bash run: | - python -m pip install --upgrade pip - pip install requests - - - name: Write script - shell: bash - run: | - - cat > /tmp/catsitemap.py << "EOF" - import re - import os - import xml.etree.ElementTree as ET - import requests - from xml.dom import minidom - - def download_file(url: str, folder_path: str, file_name: str) -> None: - """_summary_ + python -m pip install requests - Args: - url (str): _description_ - folder_path (str): _description_ - file_name (str): _description_. - """ - - # Full path for the downloaded file - file_path = os.path.join(folder_path, file_name) - - # Send the request - try: - response = requests.get(url, stream=True, timeout=10) - except: - print("Timed out while trying to get request") - raise - - # Write the file content to the specified location - with open(file_path, mode='wb') as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - - def extract_urls_and_headers(url: str) -> tuple: - """This function extracts projects names and sitemap.xml urls for each project - - Args: - url (str): link to the .rst file of the PyAnsys documentation landing page - - Returns: - tuple: returns a tuple of list of project names and list of urls to projects' sitemap.xml files - """ - try: - response = requests.get(url, timeout=10) - except requests.exceptions.Timeout: - print("Timed out while trying to get request") - raise - - content = response.text - - # Extract section headers and URLs (modify regex based on your needs) - project_names = [project_name.strip() for project_name in re.findall(r'\.\. grid-item-card:: ([\w\s-]+)', content)] - urls = re.findall(r':link: (https://[\w./-]+)', content) - - # Modify URLs - updated_urls = [re.match(r"^(https:\/\/[^\/]+)", url).group(1) + "/sitemap.xml" for url in urls] - - # Filter none existent URLS - valid_project_names = [] - valid_urls = [] - for index, url in enumerate(updated_urls): - if requests.get(url).status_code == 404: - continue - else: - valid_project_names.append(project_names[index]) - valid_urls.append(url) - - return valid_project_names, valid_urls - - - def generate_sitemap_index(project_names: list, folder_path: str, file_name: str) -> None: - """This function generates a sitemap_index.xml file indexing other sitemap.xml files - - Args: - url (list): list of the urls pointing to the location of the sitemaps - folder_path (str): _description_ - file_name (str): _description_. - """ - - # Create the root element with namespace - sitemap_index = ET.Element("sitemapindex", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9") - - - # Create sitemap elements for each URL - for project in project_names: - # Modify the url to point to the correct gh-pages directory - modified_url = f"https://docs.pyansys.com/sitemap/{project.lower().replace(" ", "")}_sitemap.xml" - - sitemap = ET.SubElement(sitemap_index, "sitemap") - loc = ET.SubElement(sitemap, "loc") - loc.text = modified_url - - # Format XML with indentation - rough_string = ET.tostring(sitemap_index, 'utf-8') - reparsed = minidom.parseString(rough_string) - pretty_xml = reparsed.toprettyxml(indent=" ") - - # Create the tree and write to XML file - file_path = os.path.join(folder_path, file_name) - with open(file_path, "w") as f: - f.write(pretty_xml) - - # Inputs - folder_path = './sitemaps' - URL = "https://docs.pyansys.com/version/dev/_sources/index.rst.txt" - - # Create folder - os.makedirs(folder_path, exist_ok=True) - - # Get actual valid URLS and corresponding project names - project_names, project_urls = extract_urls_and_headers(URL) - - # Generate global sitemaps - file_name = "globalsitemap.xml" - generate_sitemap_index(project_names, folder_path, file_name) - - for index, url in enumerate(project_urls): - file_name = project_names[index].lower().replace(" ", "") + '_sitemap.xml' - download_file(url, folder_path, file_name) - EOF + - name: Checkout repository main branch + uses: actions/checkout@v4 + with: + ref: main - - name: Run above script + - name: Copy tools folder to /tmp and run sitemap script working-directory: /tmp shell: bash run: | - python catsitemap.py + cp -r /home/runner/work/ci_cd/ci_cd/tools/ . + python ./tools/catsitemap.py - - name: Checkout repository + - name: Checkout repository test-branch branch uses: actions/checkout@v4 with: ref: test-branch @@ -221,7 +237,7 @@ jobs: # Replace "version/stable" with "version/dev" in the sitemap.xml #sed -i 's/version\/stable/version\/dev/g' sitemap.xml - - name: Copy globalsitemap.xml to root of gh-pages + - name: Move sitemaps/ to test-branch root shell: bash run: | mv /tmp/sitemaps/ /home/runner/work/ci_cd/ci_cd/ @@ -230,4 +246,4 @@ jobs: uses: EndBug/add-and-commit@v9 with: default_author: github_actions - message: "testing action script, especially that file is added" \ No newline at end of file + message: "update sitemaps folder" \ No newline at end of file diff --git a/tools/catsitemap.py b/tools/catsitemap.py new file mode 100644 index 0000000..83b1c53 --- /dev/null +++ b/tools/catsitemap.py @@ -0,0 +1,115 @@ +from pathlib import Path +import xml.etree.ElementTree as ET +import requests +from xml.dom import minidom +from links import LINKS + +def download_file(url: str, dest_path: Path) -> None: + """Given the url of a sitemap file, this function downloads the file into destination + path (dest_path) + + Parameters + ---------- + url : str + The url of the sitemap file to be downloaded + dest_path : Path + The destination path to save the downloaded file + + Raises + ------ + requests.exceptions.Timeout + Raises this exception when accessing a link takes too long + """ + + # Send the request + try: + response = requests.get(url, stream=True, timeout=30) + except requests.exceptions.Timeout as e: + print(f"Timed out while trying to get download the sitemap at this url: {url}") + raise requests.exceptions.Timeout + + # Write the file content to the specified location + with open(dest_path, mode='wb') as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + +def extract_urls_and_headers(links_dict: dict) -> tuple: + """Processes the dictionary of project metadata, confirms existence of a downloadable + sitemap, returns valid lists of project names and sitemap urls in a tuple + + Parameters + ---------- + links_dict : dict + Dictionary containing the metadata of projects + + Returns + ------- + tuple + contains the list of project names and the list of sitemap urls + """ + + valid_project_names = [] + valid_urls = [] + for project_name, url in links_dict.items(): + if url == None: + continue + updated_url = url.split("docs.pyansys.com")[0] + "docs.pyansys.com/sitemap.xml" + if requests.get(url).status_code == 404: + continue + else: + valid_project_names.append(project_name) + valid_urls.append(updated_url) + + return valid_project_names, valid_urls + + +def generate_sitemap_index(project_names: list, dest_path: Path) -> None: + """Generates the global sitemap file which will point to all other sitemaps + + Parameters + ---------- + project_names : list + List of project names with a downloadable sitemap file + dest_path : Path + The destination path to save the generated sitemap file + """ + + # Create the root element with namespace + sitemap_index = ET.Element("sitemapindex", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9") + + + # Create sitemap elements for each URL + for project in project_names: + # Modify the url to point to the correct gh-pages directory + modified_url = f"https://docs.pyansys.com/sitemap/{project}_sitemap.xml" + + sitemap = ET.SubElement(sitemap_index, "sitemap") + loc = ET.SubElement(sitemap, "loc") + loc.text = modified_url + + # Format XML with indentation + rough_string = ET.tostring(sitemap_index, 'utf-8') + reparsed = minidom.parseString(rough_string) + pretty_xml = reparsed.toprettyxml(indent=" ") + + # Create the tree and write to XML file + with open(dest_path, "w") as f: + f.write(pretty_xml) + + +# Run the script +if __name__ == "__main__": + # Create path + folder_path = Path('.') / 'sitemaps' + folder_path.mkdir() + + # Get actual valid URLS and corresponding project names + project_names, project_urls = extract_urls_and_headers(LINKS) + + # Generate global sitemap file + file_path = folder_path / "globalsitemap.xml" + generate_sitemap_index(project_names, file_path) + + for index, url in enumerate(project_urls): + file_path = folder_path / (project_names[index] + '_sitemap.xml') + download_file(url, file_path) \ No newline at end of file diff --git a/tools/links.py b/tools/links.py new file mode 100644 index 0000000..1f5e60e --- /dev/null +++ b/tools/links.py @@ -0,0 +1,181 @@ +""" +Script for automatic replacement of released links. + +Usage is very simple. Just run the script. + +.. code:: python + + python links.py +""" + +import os +import re + +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +"""Root directory of the project relative to this file.""" + +PYPROJECT_TOML_FILE = os.path.join(ROOT_DIR, "pyproject.toml") +"""Path to pyproject.toml file.""" + +DOCS_DIRECTORY = os.path.join(ROOT_DIR, "doc", "source") +"""Path to the documentation source directory""" + +LINKS = { + "ansys-acp-core": None, # Once stable release is out "https://acp.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-additive-core": "https://additive.docs.pyansys.com/version/stable", + "ansys-additive-widgets": "https://widgets.additive.docs.pyansys.com/version/stable", + "ansys-conceptev-core": "https://conceptev.docs.pyansys.com/version/stable", + "ansys-dpf-composites": "https://composites.dpf.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-dpf-core": "https://dpf.docs.pyansys.com/version/stable", + "ansys-dpf-post": "https://post.docs.pyansys.com/version/stable", + "ansys-dpf-gate": None, + "ansys-dyna-core": "https://dyna.docs.pyansys.com/version/stable", + "ansys-dynamicreporting-core": "https://dynamicreporting.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-edb-core": "https://edb.core.docs.pyansys.com/version/stable", + "ansys-geometry-core": "https://geometry.docs.pyansys.com/version/stable", + "ansys-fluent-core": "https://fluent.docs.pyansys.com/version/stable", + "ansys-hps-client": "https://hps.docs.pyansys.com/version/stable", + "ansys-mapdl-core": "https://mapdl.docs.pyansys.com/version/stable", + "ansys-math-core": "https://math.docs.pyansys.com/version/stable", + "ansys-sound-core": "https://sound.docs.pyansys.com/version/stable", + "ansys-mechanical-core": "https://mechanical.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-meshing-prime": "https://prime.docs.pyansys.com/version/stable", + "ansys-modelcenter-workflow": "https://modelcenter.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-motorcad-core": "https://motorcad.docs.pyansys.com/version/stable", + "ansys-openapi-common": None, + "ansys-optislang-core": "https://optislang.docs.pyansys.com/version/stable", + "ansys-platform-instancemanagement": "https://pypim.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-pyensight-core": "https://ensight.docs.pyansys.com/version/stable", + "ansys-rocky-core": "https://rocky.docs.pyansys.com/version/stable", + "ansys-seascape": "https://seascape.docs.pyansys.com/version/stable", + "ansys-sherlock-core": "https://sherlock.docs.pyansys.com/version/stable", + "ansys-simai-core": "https://simai.docs.pyansys.com/version/stable", + "ansys-systemcoupling-core": "https://systemcoupling.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-turbogrid-core": "https://turbogrid.docs.pyansys.com/version/stable", + "ansys-workbench-core": "https://workbench.docs.pyansys.com/version/stable", + "pyedb": "https://edb.docs.pyansys.com/version/stable", + "pyaedt": "https://aedt.docs.pyansys.com/version/stable", + "pygranta": "https://grantami.docs.pyansys.com/version/stable", + "pytwin": "https://twin.docs.pyansys.com/version/stable", + # MAPDL - ALL + "ansys-mapdl-reader": "https://reader.docs.pyansys.com/version/stable", + # FLUENT - ALL + "ansys-fluent-visualization": "https://visualization.fluent.docs.pyansys.com/version/stable", # noqa: E501 + # TOOLS + "ansys-materials-manager": "https://manager.materials.docs.pyansys.com/version/stable", + "ansys-tools-filetransfer": "https://filetransfer.tools.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-tools-local-product-launcher": "https://local-product-launcher.tools.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-tools-path": "https://path.tools.docs.pyansys.com/version/stable", + "ansys-tools-protoc-helper": None, + "ansys-tools-visualization-interface": "https://visualization-interface.tools.docs.pyansys.com/version/stable", # noqa: E501 + "ansys-units": "https://units.docs.pyansys.com/version/stable", + "pyansys-tools-report": "https://report.tools.docs.pyansys.com/version/stable", # noqa: E501 + "pyansys-tools-versioning": "https://versioning.tools.docs.pyansys.com/version/stable", # noqa: E501 + "pyansys-tools-variableinterop": "https://variableinterop.docs.pyansys.com/version/stable", # noqa: E501 +} +"""Dictionary with PyAnsys packages and their multi-version docs site.""" + + +def retrieve_major_minor(package: str): + """Extract the major and minor version of a pinned package. + + Notes + ----- + This function navigates to the package's pyproject.toml file + + and processes it to retrieve the desired major, minor version. + + + Parameters + ---------- + package : str + The package to be searched for. + + + Returns + ------- + tuple of (int, int) + The major and minor versions of the package. + + """ + with open(PYPROJECT_TOML_FILE, "r") as file: + content = file.read() + pattern = r"\b" + re.escape(package) + r"==(\d+)\.(\d+)" + match = re.search(pattern, content) + + if match: + major_version = int(match.group(1)) + minor_version = int(match.group(2)) + return (major_version, minor_version) + + return (None, None) + + +def search_and_replace(link: str, new_link: str): + """Replace existing link with the newly provided link. + + Parameters + ---------- + link : str + The link to be searched for. + new_link : str + The link to replace the existing one. + """ + # Traverse the docs directory + for root, _, files in os.walk(DOCS_DIRECTORY): + # Skip the _static subdirectory + if "_static" in root.split(os.sep): + continue + + # Process the files + for file in files: + file_path = os.path.join(root, file) + with open(file_path, "r") as f: + content = f.read() + + # Search for the link in the content, replace and save + if link in content: + new_content = content.replace(link, new_link) + + with open(file_path, "w") as f: + f.write(new_content) + + print(f"Replaced '{link}' with '{new_link}' in file: {file_path}") # noqa: E501 + + +def released_docs(): + """Update links for released documentation. + + Notes + ----- + Links are expected to point to a certain version when released + inside the metapackage, and not to the latest stable version. This + module takes care of updating the links automatically. The script has to + be run locally and the changes have to be committed to the release branch + prior to releasing. + """ + # Loop over all the above defined packages + for key, value in LINKS.items(): + # Packages that are not adapted to multi-version have a None + # link associated as its value... skip them. + if value is None: + continue + + # Retrieve the major and minor versions of the package + major, minor = retrieve_major_minor(key) + + if major is None and minor is None: + # No match found for the link... throw message + print(f"Error retrieving minor/major version of {key}... Skipping.") # noqa: E501 + continue + + # Define the new link + link_root = value.split("/version/")[0] + new_link = f"{link_root}/version/{major}.{minor}" + + # Search and replace through all our docs links + search_and_replace(value, new_link) + + +if __name__ == "__main__": + released_docs() diff --git a/tools/milestone.py b/tools/milestone.py new file mode 100644 index 0000000..5ea2736 --- /dev/null +++ b/tools/milestone.py @@ -0,0 +1,116 @@ +"""Script for automatically creating a milestone in repositories. + +Only specified repositories (via environment variable) will be handled. + +The purpose of this code is to connect to a specific repository +and create a milestone. This milestone will be associated to a certain date +provided as an input argument. + +""" + +import datetime +import os + +import github + +# Insert your credentials... It should be a PAT. None by default +TOKEN = None + +# Provide the repository you want to create a milestone in... None by default +REPOSITORY = None + +# Provide the release date to be considered... None by default +# If you provide manual input it should be using: +# +# RELEASE_DATE = datetime.datetime.strptime(date_str,"%Y/%m/%d") +# +# where "date_str" must be a string date of format YYYY/MM/DD +RELEASE_DATE = None + +# ============================================================================= +# MODIFY WITH CAUTION FROM THIS POINT ONWARDS +# ============================================================================= + +# Check if a value for TOKEN was provided +if TOKEN is None: + # This probably means that we are creating the milestone automatically + # using our GitHub action: Create milestones for Ansys Release... + # Thus, let us read the GitHub Token or PAT. + print("Reading access token from 'TOKEN' environment variable...") + TOKEN = os.environ.get("TOKEN", default=None) + +# Check if a value for REPOSITORY was provided +if REPOSITORY is None: + # This probably means that we are creating the milestone automatically + # using our GitHub action: Create milestones for Ansys Release... + # Thus, let us read the repository name. + print("Reading target repo from 'REPOSITORY' environment variable...") + REPOSITORY = os.environ.get("REPOSITORY", default=None) + +# Check if a value for RELEASE_DATE was provided +if RELEASE_DATE is None: + # This probably means that we are creating the milestone automatically + # using our GitHub action: Create milestones for Ansys Release... + # Thus, let us read the release date to be considered. + print("Reading target repo from 'RELEASE_DATE' environment variable...") + env_var = os.environ.get("RELEASE_DATE", default=None) + if env_var is not None: + try: + RELEASE_DATE = datetime.datetime.strptime(env_var, "%Y/%m/%d") + except (TypeError, ValueError, IndexError, KeyError): + raise RuntimeError( + """Problem parsing input date. It should be a string in format YYYY/MM/DD""" # noqa: E501 + ) + + +# If the value for TOKEN, REPOSITORY, RELEASE_DATE is None... throw error! +if TOKEN is None: + raise ValueError("No TOKEN value available. Consider adding it.") +elif REPOSITORY is None: + raise ValueError("No REPOSITORY value available. Consider adding it.") +elif RELEASE_DATE is None: + raise ValueError("No RELEASE_DATE value available. Consider adding it.") + +# Create a connection to GitHub +g = github.Github(TOKEN) + +# Get the repository we want to create the milestone at +repo = g.get_repo(REPOSITORY) + +# Get its last release - assuming semantic versioning (i.e. v0.1.0) +major, minor, *_ = repo.get_latest_release().tag_name.replace("v", "").split(".") # noqa: E501 +next_release = f"v{major}.{int(minor)+1}.0" + +# Get its available milestones +milestones = repo.get_milestones(state="all") + +# Check if there is already any milestone whose name matches "next_release" +is_created = False +for milestone in milestones: + if next_release in milestone.title: + # The release is already created! + is_created = True + break + +# If the milestone hasn't been created yet... go ahead! +if not is_created: + # Milestone information + desc = f"""This repository is part of an Ansys Release (unified install). + +Thus, it is necessary to create a release for the next Dev Complete date. +Please consider releasing by {RELEASE_DATE.strftime("%Y/%m/%d")}. + +If your current release is the one expected to be used in the upcoming +official Ansys Release, please close and delete this milestone.""" + + # Create a new milestone + repo.create_milestone( + title=next_release, + state="open", + description=desc, + due_on=RELEASE_DATE + datetime.timedelta(hours=12), + ) + + print(f"Milestone was created at {REPOSITORY} with name {next_release}!") +else: + print(f"Milestone was already available at {REPOSITORY}... Skipping creation!") # noqa: 501