diff --git a/.wordlist.txt b/.wordlist.txt index d59df3d82..d3f3044ee 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -3063,4 +3063,28 @@ libs petclinic signup ut -warmup \ No newline at end of file +warmup +AOM +ARNs +AdministratorAccess +AmazonS +Arial +AwsServerlessDynamoDbLambda +AwsServerlessLambda +BatchWriteCommand +BatchWriteItem +CORS +CommonJS +FHD +GetItem +IoTPage +PutItem +Renderer +Shen +UpdateItem +WebM +WebsiteHosting +flexbox +getAverageTemperature +libaom +writeTemperatures \ No newline at end of file diff --git a/content/install-guides/aperf.md b/content/install-guides/aperf.md index 0ee8687bf..974cbd17a 100644 --- a/content/install-guides/aperf.md +++ b/content/install-guides/aperf.md @@ -69,7 +69,7 @@ source ~/.bashrc Alternatively, you can copy the `aperf` executable to a directory already in your search path. ```bash { target="ubuntu:latest" } -sudo cp aperf-v0.1.9-alpha-aarch64/aperf /usr/local/bin +sudo cp aperf-v0.1.12-alpha-aarch64/aperf /usr/local/bin ``` Confirm `aperf` is installed by printing the version: diff --git a/content/learning-paths/cross-platform/_example-learning-path/_index.md b/content/learning-paths/cross-platform/_example-learning-path/_index.md index 13ce1b5ad..fdc8a2090 100644 --- a/content/learning-paths/cross-platform/_example-learning-path/_index.md +++ b/content/learning-paths/cross-platform/_example-learning-path/_index.md @@ -25,6 +25,8 @@ armips: - All operatingsystems: tools_software_languages: + - Hugo + ### FIXED, DO NOT MODIFY # ================================================================================ diff --git a/content/learning-paths/cross-platform/gitlab/_index.md b/content/learning-paths/cross-platform/gitlab/_index.md index 68a3cd7c6..2ce96256e 100644 --- a/content/learning-paths/cross-platform/gitlab/_index.md +++ b/content/learning-paths/cross-platform/gitlab/_index.md @@ -1,6 +1,9 @@ --- title: Build a CI/CD pipeline with GitLab on Google Axion draft: true +cascade: + draft: true + minutes_to_complete: 30 who_is_this_for: This is an advanced topic for DevOps professionals who are looking to build a CI/CD pipeline with GitLab on Google Axion based self-hosted GitLab runners. diff --git a/content/learning-paths/cross-platform/psa-tfm/_index.md b/content/learning-paths/cross-platform/psa-tfm/_index.md index c37f0a183..449472d84 100644 --- a/content/learning-paths/cross-platform/psa-tfm/_index.md +++ b/content/learning-paths/cross-platform/psa-tfm/_index.md @@ -1,6 +1,10 @@ --- title: Build and run Arm Trusted Firmware examples on Corstone-1000 +draft: true +cascade: + draft: true + minutes_to_complete: 120 who_is_this_for: This an introductory topic is for software developers new to Platform Security Architecture (PSA) and Arm Trusted Firmware components diff --git a/content/learning-paths/cross-platform/psa-tfm/sdm.md b/content/learning-paths/cross-platform/psa-tfm/sdm.md index 478e7e49c..44f902041 100644 --- a/content/learning-paths/cross-platform/psa-tfm/sdm.md +++ b/content/learning-paths/cross-platform/psa-tfm/sdm.md @@ -1,7 +1,6 @@ --- # User change title: Demonstrate Authenticated Debug -draft: true weight: 5 # 1 is first, 2 is second, etc. diff --git a/content/learning-paths/cross-platform/sme/_index.md b/content/learning-paths/cross-platform/sme/_index.md index f378a58ab..b0816f5c4 100644 --- a/content/learning-paths/cross-platform/sme/_index.md +++ b/content/learning-paths/cross-platform/sme/_index.md @@ -2,6 +2,8 @@ title: Get started with the Scalable Matrix Extension (SME) draft: true +cascade: + draft: true minutes_to_complete: 20 diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/01.png new file mode 100644 index 000000000..5df242264 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/01.png differ diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/02.png new file mode 100644 index 000000000..31a6f4964 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/02.png differ diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/03.png new file mode 100644 index 000000000..a5fb521bd Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/03.png differ diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/04.png new file mode 100644 index 000000000..fb42c9d86 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/04.png differ diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/05.png new file mode 100644 index 000000000..38b3ef736 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/05.png differ diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_index.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_index.md new file mode 100644 index 000000000..39c7e5429 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_index.md @@ -0,0 +1,35 @@ +--- +title: Use Amazon S3 for your IoT applications running Windows on Arm + +minutes_to_complete: 30 + +who_is_this_for: This is an advanced topic for developers who are interested in using Amazon Web Services (AWS) S3 for hosting their IoT websites. + +learning_objectives: + - Gain familiarity with Amazon S3. + - Create a static website that interacts with AWS Lambda. + +prerequisites: + - A Windows on Arm computer such as [Windows Dev Kit 2023](https://learn.microsoft.com/en-us/windows/arm/dev-kit), a Lenovo Thinkpad X13s running Windows 11 or a Windows on Arm [virtual machine](/learning-paths/cross-platform/woa_azure/). + - Any code editor. [Visual Studio Code for Arm64](https://code.visualstudio.com/docs/?dv=win32arm64user) is suitable. + - Completion of the [Use AWS Lambda for IoT applications](/learning-paths/laptops-and-desktops/win_aws_iot_lambda/) Learning Path. + +author_primary: Dawid Borycki + +### Tags +skilllevels: Advanced +subjects: Migration to Arm +armips: + - Cortex-A +operatingsystems: + - Windows +tools_software_languages: + - Node.js + - Visual Studio Code + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_next-steps.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_next-steps.md new file mode 100644 index 000000000..adc8aadb7 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_next-steps.md @@ -0,0 +1,39 @@ +--- +# ================================================================================ +# Edit +# ================================================================================ + +next_step_guidance: > + You have learned how to use Amazon S3 for an IoT solution. You can now learn how to develop IoT applications with .NET 8 on Windows on Arm. +# 1-3 sentence recommendation outlining how the reader can generally keep learning about these topics, and a specific explanation of why the next step is being recommended. + +recommended_path: "/learning-paths/laptops-and-desktops/win_net8" +# Link to the next learning path being recommended(For example this could be /learning-paths/servers-and-cloud-computing/mongodb). + + +# further_reading links to references related to this path. Can be: + # Manuals for a tool / software mentioned (type: documentation) + # Blog about related topics (type: blog) + # General online references (type: website) + +further_reading: + - resource: + title: Amazon S3 + link: https://aws.amazon.com/s3/ + type: documentation + - resource: + title: Hosting a static website using Amazon S3 + link: https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html + type: documentation + - resource: + title: Developing with Amazon S3 + link: https://docs.aws.amazon.com/AmazonS3/latest/userguide/developing-s3.html + type: documentation + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +weight: 21 # set to always be larger than the content in this path, and one more than 'review' +title: "Next Steps" # Always the same +layout: "learningpathall" # All files under learning paths have this same wrapper +--- diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_review.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_review.md new file mode 100644 index 000000000..7ba6a66e5 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/_review.md @@ -0,0 +1,51 @@ +--- +# ================================================================================ +# Edit +# ================================================================================ + +# Always 3 questions. Should try to test the reader's knowledge, and reinforce the key points you want them to remember. + # question: A one sentence question + # answers: The correct answers (from 2-4 answer options only). Should be surrounded by quotes. + # correct_answer: An integer indicating what answer is correct (index starts from 0) + # explanation: A short (1-3 sentence) explanation of why the correct answer is correct. Can add additional context if desired + + +review: + - questions: + question: > + What is the Amazon S3? + answers: + - "A part of the AWS IoT Core to process and route data between IoT devices and other AWS services." + - "A scalable, high-speed, web-based cloud storage service designed for online backup and archiving of data." + - "An engine for accelerating database access." + correct_answer: 2 + explanation: > + Amazon S3 (Simple Storage Service) is a scalable, high-speed, web-based cloud storage service designed for online backup and archiving of data and applications on Amazon Web Services (AWS). + + - questions: + question: > + Does Amazon S3 require you to set up the archive server? + answers: + - "No" + - "Yes" + correct_answer: 1 + explanation: > + Amazon S3 is a managed service, so you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling. + + - questions: + question: > + Can you use Amazon S3 for static website hosting? + answers: + - "Yes" + - "No" + correct_answer: 1 + explanation: > + Amazon S3 can also be used for static website hosting. This feature allows users to host static web pages directly from an S3 bucket, making it a cost-effective and simple solution for serving static content such as HTML, CSS, JavaScript, and images. + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +title: "Review" # Always the same title +weight: 20 # Set to always be larger than the content in this path +layout: "learningpathall" # All files under learning paths have this same wrapper +--- diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md new file mode 100644 index 000000000..2e89fef45 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md @@ -0,0 +1,46 @@ +--- +# User change +title: "Add AWS Lambda Endpoint" + +weight: 4 + +layout: "learningpathall" +--- + +### AWS Lambda + +You will now use the AWS Lambda console to retrieve the AWS Lambda endpoint for the static website. Before you begin make sure to prepare the `GetAverageTemperature` AWS Lambda function as explained in this [Learning Path](/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/). Then proceed as follows: +1. Go to the AWS Lambda console, and click the `GetAverageTemperature` Lambda function. +2. In the Lambda function dashboard, click the **Configuration** tab and then the **Function URL** as shown below: + +![fig2](Figures/02.png) + +3. Under the Function URL, click the **Create Function URL** button. +4. In the window that appears select NONE, scroll down to Additional Settings, and check **Configure cross-origin resource sharing (CORS)**. +5. Click the **Save** button + +The function URL will appear as follows: + +![fig3](Figures/03.png) + +Copy the link, and use it to replace the YOUR_API_GATEWAY_ENDPOINT_URL placeholder in the **index.js** file as follows: + +```JavaScript +document.getElementById('fetchTemperatureButton').addEventListener('click', function() { + fetch('YOUR_API_GATEWAY_ENDPOINT_URL') + .then(response => response.json()) + .then(data => { + const temperature = data.average.toFixed(2); + document.getElementById('temperatureDisplay').innerText = `Average Temperature: ${temperature} °C`; + }) + .catch(error => { + console.error('Error fetching temperature:', error); + document.getElementById('temperatureDisplay').innerText = 'Error fetching temperature'; + }); +}); +``` + +Save the file, and open **index.html**. Then, click the **Get temperature button** and the average temperature will appear as shown below: + +![fig4](Figures/04.png) + diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/background.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/background.md new file mode 100644 index 000000000..abc99518d --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/background.md @@ -0,0 +1,17 @@ +--- +# User change +title: "Background" + +weight: 2 + +layout: "learningpathall" +--- +### What is Amazon S3? + +Amazon Simple Storage Service (S3) is a scalable, high-speed, web-based cloud storage service designed for online backup and archiving of data and applications on Amazon Web Services (AWS). It provides developers and IT teams with secure, durable, and highly-scalable object storage. It also offers a simple web service interface that can be used to store and retrieve any amount of data from anywhere on the web, making it ideal for data storage, distribution, and computation tasks. It supports a range of use cases, including big data analytics, content distribution, disaster recovery, and serverless computing. With features like lifecycle management, versioning, and access controls, Amazon S3 helps organizations manage data at scale while maintaining security and compliance. + +In addition to its core storage capabilities, Amazon S3 can also be used for static website hosting. This feature allows users to host static web pages directly from an S3 bucket, making it a cost-effective and simple solution for serving content such as HTML, CSS, JavaScript, and images. By configuring the bucket for website hosting, users can define index and error documents, and take advantage of S3’s high availability and scalability to ensure their website is accessible and performant. This makes Amazon S3 an excellent choice for personal blogs, company websites, and landing pages that do not require server-side scripting. + +Amazon S3 provides Software Development Kits (SDKs) that simplify the integration of S3 into applications by providing comprehensive APIs that facilitate file uploads, downloads, and management directly from the codebase. The AWS CLI allows developers and administrators to interact with S3 from the command line, accelerating many programming and administrative tasks. + +In this Learning Path, you will learn how to use Amazon S3 to host a static website that interacts with AWS Lambda. Specifically, the Lambda function will consume data from a DynamoDB table, which is populated by a hypothetical IoT device streaming data to the cloud as explained in this [Learning Path](/learning-paths/laptops-and-desktops/win_aws_iot_lambda/). This setup not only demonstrates the seamless connectivity between various AWS services but also serves as a foundation for building an efficient dashboard for IoT solutions, providing real-time insights and data visualization. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md new file mode 100644 index 000000000..17df6ef5b --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md @@ -0,0 +1,93 @@ +--- +# User change +title: "Deploy website to Amazon S3" + +weight: 5 + +layout: "learningpathall" +--- +### Deploy to Amazon S3 + +In this section you will deploy the website to Amazon S3 using the Amazon Command Line interface (AWS CLI) version 2. If you don't already have it, start by [installing AWS CLI](/install-guides/aws-cli/). +### AWS CLI +To configure the AWS CLI you first need to create the [AWS CLI user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html#id_users_create_console). Then, you need to generate the access keys by following this [tutorial](https://docs.aws.amazon.com/cli/v1/userguide/cli-authentication-user.html). + +Once you have the access key, go to the Command Prompt and type the following: + +```console +aws configure +``` + +The command will prompt for the following: +1. AWS Access Key ID [None]: **paste in your Access Key ID** +2. AWS Secret Access Key [None]: **paste in your Access Key** +3. Default region name [None]: **type your region** +4. Default output format [None]: **press enter** + +Ensure that AWS CLI can communicate with AWS. To do so, type the following command (replace eu-central-1 with the region code you are using): +``` +console +aws lambda list-functions --region eu-central-1 --output table +``` + +The command will display the AWS Lambda functions in a table. + +### S3 bucket +You are now ready to deploy the website to AWS S3. To do this, you will first create the S3 bucket. Then, you will upload the website files, and finally you will configure the S3 bucket for static website hosting. + +Proceed as follows: +1. Create the bucket by typing the placeholder with the unique bucket name (remember to replace ), and the with the AWS region code you're using, as shown below: +``` +console +aws s3api create-bucket --bucket --region --create-bucket-configuration LocationConstraint= --object-ownership BucketOwnerPreferred +``` + +2. Upload the following files: +```console +aws s3 cp index.html s3:/// +aws s3 cp styles.css s3:/// +aws s3 cp index.js s3:/// +``` + +3. Configure the bucket policy to enable public read of the files: +```console +cat > policy.json << EOL +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadGetObject", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + } + ] +} +EOL +``` + +4. Remove the public access block on the bucket: +``` +console +aws s3api delete-public-access-block --bucket db-iot-bucket +``` + +5. Apply the policy as follows: +```console +ws s3api put-bucket-policy --bucket --policy file://policy.json +``` + +6. Enable static website hosting by: +```console +aws s3 website s3:/// --index-document index.html +``` + +Finally, access the website by typing **http://.s3-website-.amazonaws.com**, where is the name of your bucket and stands for the region you're using (here that is eu-central-1): + +![fig5](Figures/05.png) + +## Summary +In this learning path you have learned how to create a simple static website that fetches and displays temperature data from an AWS Lambda function. The website consists of an HTML file (**index.html**), a CSS file (**styles.css**), and a JavaScript file (**index.js**). The JavaScript code within the website sends an HTTP request to an AWS Lambda function, which processes the request and returns the temperature data in a JSON format. This data is then displayed on the webpage. + +After developing the website, you deployed it to Amazon S3 for static website hosting. You created an S3 bucket, disabled the default block public access settings, and uploaded the website files to the bucket. You then applied a bucket policy to allow public read access to the objects, and configured the bucket to serve as a static website. Finally, you accessed the website using the S3 static website endpoint, successfully integrating the web application with AWS Lambda and deploying it to the cloud. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md new file mode 100644 index 000000000..3016309f4 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md @@ -0,0 +1,144 @@ +--- +# User change +title: "Static website" + +weight: 3 + +layout: "learningpathall" +--- + +### Create a static website + +Start by creating the static website. To do this, create a new folder, for example, named **IoTPage**. Within the **IoTPage** folder, create three further (and essential) files: +* index.html - contains the structure and content of your webpage +* styles.css - defines the styling and layout +* index.js - handles the interactive functionalities, such as fetching data from AWS Lambda and displaying it on your site + +These files will serve as the backbone of your static website. + +Open **index.html** and modify it as follows: + +```html + + + + + + IoTPage + + + +
+

Temperature

+ +
Temperature will be displayed here
+
+ + + +``` + +The above declarations link to an external CSS file (styles.css) which styles the webpage. The body of the website contains a heading called “Temperature”, a “Get Temperature” button which, when clicked, will fetch the temperature using the AWS Lambda function, and a display box where the fetched temperature will be shown. + +Finally, the declarations link to an external JavaScript file (index.js) which will handle the button click and fetch the temperature data. + +Next, open the **styles.css** file and modify it as follows: +```css +body { + font-family: Arial, sans-serif; + display: flex; + justify-content: center; + align-items: center; + height: 100vh; + background-color: #f0f0f0; + margin: 0; +} + +.container { + text-align: center; + background: white; + padding: 20px; + border-radius: 8px; + box-shadow: 0 4px 8px rgba(0,0,0,0.1); +} + +button { + background-color: #4CAF50; + color: white; + border: none; + padding: 10px 20px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 10px 0; + cursor: pointer; + border-radius: 5px; +} + +button:hover { + background-color: #45a049; +} + +.display-box { + margin-top: 20px; + padding: 15px; + border: 1px solid #ddd; + border-radius: 4px; + background-color: #fafafa; +} +``` + +The first part of the declaration relates to the body of the document. It sets the font to Arial or a sans-serif alternative. You use flexbox to center content both vertically and horizontally. The height is set to 100% of the viewport height. Also, the declarations will apply a light gray background color, and remove the default margin. + +Then, there is a declaration of the **.container** class. It does the following: +* centers the text within the container. +* sets a white background. +* adds padding inside the container. +* rounds the corners with a border radius. +* adds a subtle shadow for a 3D effect. + +Next, you have a declaration of the **button** style, which does the following: +* sets a green background color. +* changes the text color to white. +* removes the default border. +* adds padding for spacing inside the button. +* centers the text and removes text decoration. +* ensures buttons are displayed as inline blocks. +* sets a font size. +* adds a margin for spacing around the button. +* changes the cursor to a pointer on hover. +* rounds the button corners. + +A CSS file also styles the button element when it's hovered over by darkening the green background color. + +Finally, you have the **.display-box** class, which will do the following: +* adds a top margin for spacing. +* adds padding inside the display box. +* sets a light gray border. +* rounds the corners. +* sets a very light gray background color. + +This CSS file provides a clean, centered, and modern look for the webpage with distinct styling for buttons and display boxes, enhancing the user experience. + +After declaring the CSS you will implement the **index.js** file as follows: +```JavaScript +document.getElementById('fetchTemperatureButton').addEventListener('click', function() { + fetch('YOUR_API_GATEWAY_ENDPOINT_URL') + .then(response => response.json()) + .then(data => { + const temperature = data.average.toFixed(2); + document.getElementById('temperatureDisplay').innerText = `Average Temperature: ${temperature} °C`; + }) + .catch(error => { + console.error('Error fetching temperature:', error); + document.getElementById('temperatureDisplay').innerText = 'Error fetching temperature'; + }); +}); +``` + +After saving all the files, open the **index.html** file in a web browser. It should render as follows: + +![fig1](Figures/01.png) + +The website is now ready. However, to fetch temperatures, you will need to add the Lambda endpoint. You will configure this in the next section. diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/2build.md b/content/learning-paths/microcontrollers/keilstudiocloud/2build.md index bd4f5672e..2feedd810 100644 --- a/content/learning-paths/microcontrollers/keilstudiocloud/2build.md +++ b/content/learning-paths/microcontrollers/keilstudiocloud/2build.md @@ -26,30 +26,28 @@ To work with development boards over USB, you must use Keil Studio in a desktop You can either download or import example projects into Keil Studio from the **Projects** tab and get access to board details from the **Features** and **Documentation** tabs. -1. Find the **Blinky** example in the **Projects** tab and click the **Open in Keil Studio** button. - - ![Find and import Blinky #center](ksc_blinky_import.png "Find and import Blinky project") +1. Find the **hello** example in the **Projects** tab and click the **Open in Keil Studio** button. + ![Find and import hello #center](ksc_hello_import.png "Find and import hello project") ## Import and build an example project -1. Log into Keil Studio with your Arm or Mbed account if you are not already logged in. Keil Studio opens. Confirm the project name in the **Import Project** dialog box. Keil Studio sets the newly imported project as the active project by default. +1. Log into Keil Studio with your Arm or Mbed account if you are not already logged in. Keil Studio opens. Confirm the project name in the **Clone** dialog box. Keil Studio sets the newly imported project as the active project by default. -![Import dialog #center](ksc_import_project.png "Specify a name for the imported project") +![Import dialog #center](ksc_import_hello_project.png "Specify a name for the imported project") 2. Click **Add project**. The project loads to your workspace and is the active project. The README.md file of the project displays. Review the file to learn more about project settings and board requirements. ![Project imported #center](ksc_project_imported.png "First screen after project import") - -3. In the top-left corner, select the **Target hardware**. Set to **AVH (SSE-300-MPS3)**: +3. In the top-left corner, select the **Connected device**. Set to **Virtual Device - SSE-300-MPS3**: ![Target selection #center](ksc_target_selection.png "Select your target") -4. Use the build button (1) to build the project: +4. Use the build button to create a binary image: -![Build project #center](ksc_build_run.png "Build the project for your target") +![Build project #center](ksc_build.png "Build the project for your target") 5. The **Output** window shows the success of the operation: @@ -57,11 +55,11 @@ To work with development boards over USB, you must use Keil Studio in a desktop ## Run the example project -1. Use the run button (2) to run the project on Arm Virtual Hardware in your browser: +1. Use the play button to run the project on Arm Virtual Hardware in your browser: -![Build project #center](ksc_build_run.png "Build the project for your target") +![Build project #center](ksc_run.png "Run the project on your target") -2. Again, the **Output** window shows the success of the operation: +2. The **Task: Run Program** window shows the success of the operation: ![Output log #center](ksc_run_output_log.png "Output window logs operational success") @@ -69,15 +67,12 @@ To stop program execution, click the **Cancel** button. ## Manage Software Components -If you want to review or change the software components that are used in the project, open the **Manage Software Components** view by clicking button (3): +If you want to review or change the software components that are used in the project, open the **Manage Software Components** view by clicking the button: -![Build project #center](ksc_build_run.png "Build the project for your target") +![Build project #center](ksc_manage.png "Build the project for your target") The view opens and shows the currently selected software components: ![Manage Software Components #center](ksc_manage_sw_comp.png "Manage Software Components View") - Using the **Show selected only** toggle, you can switch between this short view and the full list of components. In the full list, enable or disable components as required. - - diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/5misc.md b/content/learning-paths/microcontrollers/keilstudiocloud/5misc.md deleted file mode 100644 index e447d07a2..000000000 --- a/content/learning-paths/microcontrollers/keilstudiocloud/5misc.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -# User change -title: "Mbed OS projects" - -weight: 5 # 1 is first, 2 is second, etc. - -# Do not modify these elements -layout: "learningpathall" ---- -## Import an Mbed OS project - -Keil Studio is the successor to the Mbed Online Compiler, and allows you to develop Mbed OS 5 and 6 projects on supported Mbed Enabled boards. Keil Studio also provides limited support for Mbed 2. To get started, you can import Mbed projects from your Online Compiler workspace or mbed.com. - -For more information, see the Keil Studio [documentation](https://developer.arm.com/documentation/102497/latest/Tutorials/Get-started-with-an-Mbed-OS-Blinky-example). diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_blinky_import.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_blinky_import.png deleted file mode 100644 index fd471a8e2..000000000 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_blinky_import.png and /dev/null differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build.png new file mode 100644 index 000000000..d7a90507c Binary files /dev/null and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_output_log.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_output_log.png index 063b77b44..f10b79ef6 100644 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_output_log.png and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_output_log.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_run.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_run.png deleted file mode 100644 index 09f4c6c02..000000000 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_build_run.png and /dev/null differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_hello_import.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_hello_import.png new file mode 100644 index 000000000..f3b519ddb Binary files /dev/null and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_hello_import.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_import_hello_project.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_import_hello_project.png new file mode 100644 index 000000000..342ff34f0 Binary files /dev/null and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_import_hello_project.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_import_project.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_import_project.png deleted file mode 100644 index 8b683549f..000000000 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_import_project.png and /dev/null differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage.png new file mode 100644 index 000000000..21e891a93 Binary files /dev/null and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage_sw_comp.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage_sw_comp.png index e221a2555..8dfe335a3 100644 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage_sw_comp.png and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_manage_sw_comp.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_project_imported.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_project_imported.png index 1c333bbf7..e82cdbb74 100644 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_project_imported.png and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_project_imported.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run.png new file mode 100644 index 000000000..51842341c Binary files /dev/null and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run_output_log.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run_output_log.png index 0ff6fcffb..089f7b0ba 100644 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run_output_log.png and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_run_output_log.png differ diff --git a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_target_selection.png b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_target_selection.png index ee636cd2a..5d6343d93 100644 Binary files a/content/learning-paths/microcontrollers/keilstudiocloud/ksc_target_selection.png and b/content/learning-paths/microcontrollers/keilstudiocloud/ksc_target_selection.png differ diff --git a/content/learning-paths/microcontrollers/mlek/_index.md b/content/learning-paths/microcontrollers/mlek/_index.md index 4f74639c2..4a0460243 100644 --- a/content/learning-paths/microcontrollers/mlek/_index.md +++ b/content/learning-paths/microcontrollers/mlek/_index.md @@ -17,6 +17,9 @@ author_primary: Ronan Synnott ### RS: Learning Path hidden until AWS instance updated draft: true +cascade: + draft: true + ### Tags skilllevels: Introductory diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/_index.md b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/_index.md index aaf7c2a88..f36ca1cad 100644 --- a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/_index.md @@ -1,9 +1,10 @@ --- -title: Code level Performance Analysis using the PMUv3 plugin -draft: true + +title: Implement Code level Performance Analysis using the PMUv3 plugin + minutes_to_complete: 60 -who_is_this_for: Engineers who want to do C/C++ performance analysis by instrumenting code at the block level. +who_is_this_for: Engineers who want to carry out C/C++ performance analysis by instrumenting code at the block level. learning_objectives: - Generate a fine-grained, precise measurement of functions and other sections of code. diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/before-you-begin.md b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/before-you-begin.md index 78f5346e0..1a188ccca 100644 --- a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/before-you-begin.md +++ b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/before-you-begin.md @@ -10,27 +10,27 @@ To get started, navigate to an empty directory on your Arm Linux computer and pr ## User space PMU access -To use the PMUv3 plugin, you need permission to access to the performance counters from userspace applications. +To use the PMUv3 plugin, you need permission to access the performance counters from userspace applications. -To enable userspace access until the next reboot run: +To enable userspace access until the next reboot, run the following: ```console sudo sysctl kernel/perf_user_access=1 ``` -If access is allowed, the command output is: +If access is allowed, the command output is as follows: ```output kernel.perf_user_access = 1 ``` -You can check if userspace access is enabled any time by running: +You can check if userspace access is enabled any time by running the following: ```console cat /proc/sys/kernel/perf_user_access ``` -A value of 1 means userspace access is enabled and value of 0 indicates disabled. +A value of 1 means userspace access is enabled and a value of 0 indicates that it's disabled. To permanently change the value, add the following line to the file `/etc/sysctl.conf`: @@ -40,7 +40,7 @@ kernel.perf_user_access = 1 ## Directory structure -The instructions assume you have the Linux kernel source tree, the PMUv3 plugin source code, and your test application in parallel. If you have a different directory structure you may need to adjust the build commands to find the header files and libraries. +The instructions assume you have the Linux kernel source tree, the PMUv3 plugin source code, and your test application in parallel. If you have a different directory structure, you may need to adjust the build commands to find the header files and libraries. Here are the 3 directories you will create: @@ -56,7 +56,7 @@ The PMUv3 plugin requires two Linux Perf related libraries. The easiest way to get them is to build them from the Linux source tree. -Download the Linux source using the `git` command: +Download the Linux source using the `git` command as follows: ```console git clone git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git @@ -64,13 +64,13 @@ git clone git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git The Linux kernel repository is large so it will take some time to download. -Install the GNU compiler. If you are running on Ubuntu you can run: +Install the GNU compiler. If you are running on Ubuntu, you can run the following: ```console sudo apt install build-essential -y ``` -When the Linux source download is complete, build the Perf libraries, `libperf.a` and `libapi.a`: +When the Linux source download is complete, build the Perf libraries `libperf.a` and `libapi.a`: ```console pushd linux/tools/lib/perf @@ -86,7 +86,7 @@ Get the PMUv3 plugin source code by running: git clone https://github.com/GayathriNarayana19/PMUv3_plugin.git ``` -Copy the Perf libs. +Copy the Perf libs: ```console cd PMUv3_plugin @@ -106,9 +106,9 @@ ar rcs libpmuv3_plugin_bundle.a pmuv3_plugin_bundle.o processing.o ar rcs libpmuv3_plugin_bundle.a pmuv3_plugin_bundle.o processing_c.o ``` -To do the static library compilation, run ./build.sh from /home/ubuntu/ut_integration/PMUv3_plugin/directory. -Run ./build.sh if you are going to instrument around a C++ codebase. If it is a C codebase, then comment line 19 of build.sh and uncomment line 20 and run ./build.sh +To do the static library compilation, run `./build.sh` from **/home/ubuntu/ut_integration/PMUv3_plugin/directory**. +Run `./build.sh` if you are going to instrument around a C++ codebase. If it is a C codebase, comment line 19 of `build.sh`, uncomment line 20, and run `./build.sh`. -You are now ready to use the PMUv3 plugin in your software project. You need to add the library `-lpmuv3_plugin_bundle.a` to your C/C++ link command. You can use `-I` to point to the plugin include files and `-L` to point to the library location. +You are now ready to use the PMUv3 plugin in your software project. You will now need to add the library `-lpmuv3_plugin_bundle.a` to your C/C++ link command. You can use `-I` to point to the plugin files and `-L` to point to the library location. -Continue to see the options for instrumenting code. +Continue to the next section see the options for instrumenting code. diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation.md b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation.md index 96856b087..b7412bd3e 100644 --- a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation.md +++ b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation.md @@ -12,13 +12,13 @@ The instrumentation scenarios are listed below, covering the most common situati So far you have the Linux kernel source tree and the PMUv3 plugin source code. -Next, create a third directory to learn how to integrate the PMUv3 plugin into an application. +Next, create a third directory to learn how to integrate the PMUv3 plugin into an application as follows: ```console cd ../ ; mkdir test ; cd test ``` -The instructions assume you have all three directories in parallel. If you have a different directory structure you may need to adjust the build commands to find the header files and libraries. +The instructions assume you have all three directories in parallel. If you have a different directory structure, you may need to adjust the build commands to find the header files and libraries. Here are the 3 directories you now have: @@ -27,7 +27,6 @@ Here are the 3 directories you now have: ./PMUv3_plugin ./test ``` - You can use the test directory to try out the integration scenarios. ## Instrumenting a single code block in C @@ -42,7 +41,7 @@ The general process to instrument code includes the following steps: - Write the collected data to a CSV file by calling `post_process()` with the same bundle number - Clean up with `shutdown_resources()` -As an example, use a text editor to create a file `test1.c` in the `test` directory with the contents below. +As an example, use a text editor to create a file `test1.c` in the `test` directory with the contents below: ```C #include @@ -103,10 +102,9 @@ int main(int argc, char **argv) { return EXIT_SUCCESS; } ``` - The include files and function calls are added in the code to provide the performance instrumentation. -Build the application: +Build the application as follows: ```console gcc -I ../linux/tools/lib/perf/include -I ../PMUv3_plugin/ test1.c -o test1 -L ../PMUv3_plugin/ -lpmuv3_plugin_bundle -lperf -lapi -lm @@ -118,7 +116,7 @@ Run the application and pass the bundle number of 4 (to capture stall informatio sudo ./test1 4 ``` -The output prints: +The output prints the following: ```output - running pmuv3_plugin_bundle.c...OK @@ -135,11 +133,33 @@ Display the text file to see the contents: cat bundle4.csv ``` -The data shows the metrics on the first line and the values on the second line. +The data shows the metrics on the first line and the values on the second line as shown below: ```output CPU_CYCLES,STALL_FRONTEND,STALL_BACKEND 1285367,68386,278994 ``` -The next section explains how to instrument multiple sections of code. \ No newline at end of file +## Collect data for all bundles + +You can quickly collect the data for all bundles. Save the code below in a file named `run.sh`: + +```console +#!/bin/bash + +for i in {0..14} +do + echo $i + sudo ./test1 $i +done +``` + +Run the script: +```console +bash ./run.sh +``` + +All 15 of the bundle CSV files have been generated. + +Next, learn how you can visualize the data. + diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation2.md b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation2.md index 597c4211f..30d5297eb 100644 --- a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation2.md +++ b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/instrumentation2.md @@ -1,6 +1,6 @@ --- title: Instrument multiple sections of code -weight: 5 +weight: 6 ### FIXED, DO NOT MODIFY layout: learningpathall @@ -8,15 +8,13 @@ layout: learningpathall You can also instrument multiple sections of code. - - ## Instrumenting multiple code blocks in C -The second scenario is to instrument a single section of code in C. +The next scenario is to instrument a single section of code in C. The API is slightly different, but the concept is the same. -For multiple code segments the first two steps and cleanup are the same, but the start and stop functions are slightly different because they include markers to indicate which segment you are profiling. +For multiple code segments the first two steps and cleanup are the same but the start and stop functions are slightly different because they include markers to indicate which segment you are profiling. Here are the steps for multiple segments: - Include 2 header files (same) @@ -27,11 +25,11 @@ Here are the steps for multiple segments: - Write the collected data to a CSV file by calling `process_data()` with the same bundle number - Clean up with `shutdown_resources()` -You can repeat for additional segments, but getting the next segment number and using the start and stop functions again. +You can repeat for additional segments by getting the next segment number and using the start and stop functions again. -The example below collects separate for the `initialize_vectors()` function and the `calculate_result()` functions instead of collecting the data for both of them as in the previous example. +The example below collects the `initialize_vectors()` function and the `calculate_result()` functions separately instead of collecting the data for both of them as in the previous example. -Use a text editor to create a file `test2.c` in the test directory with the contents below. +Use a text editor to create a file `test2.c` in the test directory with the contents below: ```C #include @@ -106,7 +104,7 @@ Build the application: gcc -I ../linux/tools/lib/perf/include -I ../PMUv3_plugin/ test2.c -o test2 -L ../PMUv3_plugin/ -lpmuv3_plugin_bundle -lperf -lapi -lm ``` -Run the application and pass the bundle number of 3 (to capture stall information): +Run the application and pass the bundle number of 3 (to capture the stall information): ```console sudo ./test2 3 @@ -150,29 +148,4 @@ SECTION_1,60569,254,10871,0,0,0,0 SECTION_2,7413,22,1917,0,0,0,0 ``` -## Collect data for all bundles - -You can quickly collect the data for all bundles by passing - -Save the code below in a file named `run.sh`. - -```console -#!/bin/bash - -for i in {0..14} -do - echo $i - sudo ./test2 $i -done -``` - -Run the script: - -```console -bash ./run.sh -``` - -All 15 of the bundle CSV files are generated. - -Next, learn how you can visualize the data. - +You can use this methodology to instrument multiple sections of code and generate the data for all bundles by modifying the `run.sh` file from the single section instrumentation. All you need to do is change the command from `test1` to `test2` and invoke the `run.sh` script again. diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/intro.md b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/intro.md index df5316b94..ad4e4392c 100644 --- a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/intro.md +++ b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/intro.md @@ -8,7 +8,7 @@ layout: learningpathall ## Why should you use the PMUv3 plugin? -Many tools allow you to profile a complete application, but sometimes you need to analyze specific code sections in order to investigate performance. When you need precise measurement of individual functions or sections of code implementing a specific task you can use the PMUv3 plugin. +Many tools allow you to profile a complete application but sometimes you need to analyze specific code sections in order to investigate performance. When you need precise measurement of individual functions or sections of code implementing a specific task you can use the PMUv3 plugin. The PMUv3 plugin uses C/C++ code instrumentation and the hardware events available in the Arm PMUv3 architecture to provide this functionality. @@ -18,130 +18,18 @@ To access the performance counter registers directly, the instrumentation code u The PMUv3 plugin requires you to run applications with sudo or as root to access the performance counters. {{% /notice %}} -The PMUv3 plugin provides an easy way to measure CPU Cycle counts as well as more complex scenarios to measure different bundles of events in one run, such as multiple cache metrics along with CPU cycles. +The PMUv3 plugin provides an easy way to measure CPU Cycle counts as well as more complex scenarios to measure different bundles of events in one run, such as multiple cache metrics along with CPU cycles. It not only records the values of raw counter registers but also provides support to visualize the results in a .CSV format using a post-processing program. -The PMUv3 plugin not only records values of raw counter registers but also provides support to visualize the results in a CSV format using a post-processing program. - -The source code for the PMUv3 plugin is written in C. You can call the APIs from a C codebase by including the header file. For a C++ codebase, you can include the headers using the `extern` keyword. +The source code for the PMUv3 plugin is written in C and you can call the APIs from a C codebase by including the header file. For a C++ codebase, you can include the headers using the `extern` keyword. ## Features of the PMUv3 plugin -The PMUv3 plugin groups performance events together into categories called bundles. There are 15 categories (bundles), and each bundle has a set of PMU events. +The PMUv3 plugin groups performance events together into categories called bundles. There are 15 categories (bundles) and each bundle has a set of PMU events. -The events in each bundle and the derived performance metrics are shown below: +The events in each bundle and the derived performance metrics are shown in the table below: ![example image alt-text#center](bundles.png "Table 1. Bundled Events") Next, learn how to get the PMUv3 plugin and use it in an application. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/plot.md b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/plot.md index ff997a539..2d38139c2 100644 --- a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/plot.md +++ b/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/plot.md @@ -1,6 +1,6 @@ --- title: Plot, visualize, and analyze the results -weight: 7 # _index.md always has weight of 1 to order correctly +weight: 5 # _index.md always has weight of 1 to order correctly ### FIXED, DO NOT MODIFY layout: "learningpathall" @@ -37,7 +37,7 @@ Download the Python application code to plot and analyze results: git clone https://github.com/GayathriNarayana19/Performance_Analysis_Backend.git ``` -Copy the code below into a file named `config.yaml` in your `test/` directory which contains your CSV files. +Copy the code below into a file named `config.yaml` in your `test/` directory which contains your CSV files: ```yaml base_dirs: @@ -48,7 +48,7 @@ base_filename: 'bundle{}.csv' num_bundles: 15 scenarios: - "test1: section1" -context: 'SECTION_1' +title: 'Section1' #########DO NOT MODIFY BELOW THIS LINE########## kpi_metrics: @@ -100,11 +100,12 @@ kpi_file_groups: - ["bundle8.csv", "bundle9.csv", "bundle14.csv"] ``` -Run the Python application to create the performance plots. +Run the Python application to create the performance plots as follows: ```console -python3 ../Performance_Analysis_Backend/PMUv3_Backend/pmuv3_plotting.py -config config.yaml +python3 Performance_Analysis_Backend/PMUv3_Backend/pmuv3_plotting.py -config config.yaml ``` -Look in the `test_plotting/` directory for the PDF file with the results. +Look in the `test_plotting/` directory for a CSV file and the PDF files with the results. +The next section explains how to instrument multiple sections of code. diff --git a/content/learning-paths/servers-and-cloud-computing/codec1/_index.md b/content/learning-paths/servers-and-cloud-computing/codec1/_index.md index 4760f7f09..96eb18398 100644 --- a/content/learning-paths/servers-and-cloud-computing/codec1/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/codec1/_index.md @@ -1,6 +1,9 @@ --- title: Run the AV1 and VP9 codecs on Arm Linux draft: true +cascade: + draft: true + author_primary: Odin Shen minutes_to_complete: 30 diff --git a/content/learning-paths/servers-and-cloud-computing/java-on-axion/_index.md b/content/learning-paths/servers-and-cloud-computing/java-on-axion/_index.md index 76e9aab30..425b8702a 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-on-axion/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/java-on-axion/_index.md @@ -1,6 +1,9 @@ --- title: Run Java applications on Google Axion processors draft: true +cascade: + draft: true + minutes_to_complete: 20 who_is_this_for: This is an introductory topic for software developers who want to learn how to run their Java-based applications on Arm-based Google Axion processors in Google Cloud. Most Java applications will run on Axion with no changes needed, but there are optimizations that can help improve application performance on Axion. diff --git a/content/learning-paths/servers-and-cloud-computing/nginx/install_from_package.md b/content/learning-paths/servers-and-cloud-computing/nginx/install_from_package.md index f8707c1cc..0c9454084 100644 --- a/content/learning-paths/servers-and-cloud-computing/nginx/install_from_package.md +++ b/content/learning-paths/servers-and-cloud-computing/nginx/install_from_package.md @@ -12,7 +12,7 @@ If you plan to build Nginx from source, you can skip to the next section. Howeve ## About Nginx documentation -There are two sets of Nginx documentation. [Documentation](https://nginx.org/en/docs/) on [nginx.org](https://nginx.org) and [documentation](https://docs.nginx.com/nginx/) on [nginx.com](https://www.nginx.com/). The nginx.org documentation covers the open source version of Nginx and the nginx.com documentation covers Nginx Plus. Even if you are working with the open source version, you should explore the documentation on nginx.com and the helpful [Admin Guide](https://docs.nginx.com/nginx/admin-guide/). +There are two sets of Nginx documentation. [Documentation](https://nginx.org/en/docs/) on [nginx.org](https://nginx.org) and [documentation](https://docs.nginx.com/nginx/) from [www.f5.com/products/nginx/nginx-plus](https://www.f5.com/products/nginx/nginx-plus). The nginx.org documentation covers the open source version of Nginx and the docs.nginx.com documentation covers Nginx Plus. Even if you are working with the open source version, you should explore the documentation on docs.nginx.com and the helpful [Admin Guide](https://docs.nginx.com/nginx/admin-guide/). You can explore the documentation to gain key insights that will help with deployment, configuration, and performance. diff --git a/content/learning-paths/servers-and-cloud-computing/postgresql_tune/before_and_after.md b/content/learning-paths/servers-and-cloud-computing/postgresql_tune/before_and_after.md index 3e417f7d5..c88dfdb33 100644 --- a/content/learning-paths/servers-and-cloud-computing/postgresql_tune/before_and_after.md +++ b/content/learning-paths/servers-and-cloud-computing/postgresql_tune/before_and_after.md @@ -6,7 +6,7 @@ layout: "learningpathall" ## About database performance tuning -Deployment configurations and the profile of SQL requests made by client will differ based on the use case. This means there is no one size fits all set of tuning parameters for `PostgreSQL`. Use the information in this learning path to help you tune `PostgreSQL` for your use case. +Deployment configurations and the profile of SQL requests made by clients will differ based on the use case. This means there is no one size fits all set of tuning parameters for `PostgreSQL`. Use the information in this learning path as guidance to help you tune `PostgreSQL` for your use case. Make sure to test any changes made to tuning parameters. ## Importance of tuning diff --git a/content/learning-paths/servers-and-cloud-computing/postgresql_tune/kernel_comp_lib.md b/content/learning-paths/servers-and-cloud-computing/postgresql_tune/kernel_comp_lib.md index 011a92bf6..d48596c11 100644 --- a/content/learning-paths/servers-and-cloud-computing/postgresql_tune/kernel_comp_lib.md +++ b/content/learning-paths/servers-and-cloud-computing/postgresql_tune/kernel_comp_lib.md @@ -134,18 +134,20 @@ More information on the different parameters that affect the configuration of hu `PostgreSQL` writes data to files like any Linux process does. The behavior of the page cache can affect performance. There are two sysctl that parameters control how often the kernel flushes the page cache data to disk. -- `vm.dirty_background_ratio` -- `vm.dirty_ratio` +- `vm.dirty_background_ratio=5` +- `vm.dirty_ratio=80` The `vm.dirty_background_ratio` sets the percentage of the page cache that needs to be dirty in order for a flush to disk to start in the background. Setting this value to lower than the default (typically 10) helps write heavy workloads. This is because by lowering this threshold, you are spreading writes to storage over time. This reduces the probability of saturating storage. +Setting this value to 5 can improve performance. + The `vm.dirty_ratio` sets the percentage of the page cache that needs to be dirty in order for threads that are writing to storage to be paused to allow flushing to catch up. Setting this value higher than default (typically 10-20) helps performance when disk writes are bursty. A higher value gives the background flusher (controlled by `vm.dirty_background_ratio`) more time to catch up. -Setting this as high as 80 can help performance. +Setting this as high as 80 can improve performance. ## Compiler Considerations diff --git a/content/learning-paths/servers-and-cloud-computing/postgresql_tune/tuning.md b/content/learning-paths/servers-and-cloud-computing/postgresql_tune/tuning.md index 550fdf70c..d919604cb 100644 --- a/content/learning-paths/servers-and-cloud-computing/postgresql_tune/tuning.md +++ b/content/learning-paths/servers-and-cloud-computing/postgresql_tune/tuning.md @@ -19,7 +19,7 @@ The configurations below can be directly pasted into a `PostgreSQL` configuratio ### Connections and prepared transactions ```output -max_connections = 1000 # Default 100 +max_connections = 1000 # Default 100 max_prepared_transactions = 1000 # Default 0 ``` @@ -29,7 +29,7 @@ Keep in mind that more client connections means more resources will be consumed `max_prepared_transactions` is 0 by default. -This means that stored procedures and functions cannot be used out of the box. It must be enabled by setting `max_prepared_transactions` to a value greater than 0. If this is set to a number larger than 0, a good number to start with would be at least as large as `max_connections`. +This means that stored procedures and functions cannot be used out of the box. It must be enabled by setting `max_prepared_transactions` to a value greater than 0. If this is set to a number larger than 0, a good number to start with would be at least as large as `max_connections`. In a test or development environment, it doesn't hurt to set it to an even larger value(10000) to avoid errors. Using procedures and functions can greatly improve performance. @@ -55,7 +55,7 @@ However, you can explicitly set it to `on` because errors will be produced if hu ### Processing and process count ```output -deadlock_timeout = 10s # Default is 1s +deadlock_timeout = 10s # Default is 1s max_worker_processes = # Default is 8 ``` @@ -67,13 +67,14 @@ max_worker_processes = # Default is 8 ```output synchronous_commit = off # Default is on -max_wal_size = 20GB # Default is 1GB -wal_recycle = off # Default is on +max_wal_size = 20GB # Default is 1GB +min_wal_size = 1GB # Default is 80MB +wal_recycle = off # Default is on ``` If `synchronous_commit` is on (default), it tells the WAL processor to wait until more of the log is applied before reporting success to clients. Turning this off means that the PostgreSQL instance will report success to clients sooner. This will result in a performance improvement. It is safe to turn this off in most cases, but keep in mind that it will increase the risk of losing transactions if there is a crash. However, it will not increase the risk of data corruption. -In high load scenarios, check pointing can happen very often. In fact, in testing with HammerDB, there may be so much check pointing that PostgreSQL reports warnings. One way to reduce how often check pointing occurs is to increase the `max_wal_size` of the WAL log. Setting it to 20GB can make the excessive check pointing warnings go away. +In high load scenarios, check pointing can happen very often. In fact, in testing with HammerDB, there may be so much check pointing that PostgreSQL reports warnings. One way to reduce how often check pointing occurs is to increase the `max_wal_size` of the WAL log. Setting it to 20GB can make the excessive check pointing warnings go away. `min_wal_size` can also be increased to help absorb spikes in WAL log usage under high load. `wal_recycle` does not impact performance. However, in scenarios where a large amount of data is being loaded (for example, restoring a database), turning this off will speed up the data load and reduce the chances of replication errors to occur if streaming replication is used. @@ -119,4 +120,4 @@ effective_io_concurrency = 300 # Default is 1 Doubling `max_parallel_workers_per_gather` and `max_parallel_maintenance_workers` to 4 seems to provide the most benefit. -`effective_io_concurrency` affects how many parallel IO requests you can send to storage. Modern storage technologies tend to allow a large number of IOPS. Thus, setting this higher is advised. Also note, this parameter only affects bitmap heap scans. A bitmap heap scan is an "in between" method for processing a query. That is, while Index scans (and Index only scans) are typically the fastest way to access data, and sequential scans are typically the slowest way to access data. A bitmap heap scan is in between these extremes. \ No newline at end of file +`effective_io_concurrency` affects how many parallel IO requests you can send to storage. Modern storage technologies tend to allow a large number of IOPS. Thus, setting this higher is advised. Also note, this parameter only affects bitmap heap scans. A bitmap heap scan is an "in between" method for processing a query. That is, while Index scans (and Index only scans) are typically the fastest way to access data, and sequential scans are typically the slowest way to access data. A bitmap heap scan is in between these extremes. diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_index.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_index.md new file mode 100644 index 000000000..7526e52d0 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_index.md @@ -0,0 +1,39 @@ +--- +title: Deploy AWS services using the Serverless Framework + +minutes_to_complete: 30 + +who_is_this_for: This learning path is for software developers interested in learning how to deploy AWS cloud resources using the Serverless Framework. + +learning_objectives: + - Learn how to set up Serverless Framework for AWS. + - Create a project and deploy AWS Lambda function. + +prerequisites: + - A Windows on Arm computer such as [Windows Dev Kit 2023](https://learn.microsoft.com/en-us/windows/arm/dev-kit), a Lenovo Thinkpad X13s running Windows 11 or a Windows on Arm [virtual machine](/learning-paths/cross-platform/woa_azure/). + - Any code editor. [Visual Studio Code for Arm64](https://code.visualstudio.com/docs/?dv=win32arm64user) is suitable. + +author_primary: Dawid Borycki + +### Tags +skilllevels: Introductory +subjects: Cloud +cloud_service_providers: Amazon Web Services + +armips: + - Neoverse + +tools_software_languages: + - Node.js + - Visual Studio Code + +operatingsystems: + - Windows + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_next-steps.md new file mode 100644 index 000000000..4383b08cd --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_next-steps.md @@ -0,0 +1,40 @@ +--- +# ================================================================================ +# Edit +# ================================================================================ + +next_step_guidance: > + You can continue learning about how to deploy serverless applications using the Serverless Framework and AWS. +# 1-3 sentence recommendation outlining how the reader can generally keep learning about these topics, and a specific explanation of why the next step is being recommended. + +recommended_path: "/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb" +# Link to the next learning path being recommended(For example this could be /learning-paths/servers-and-cloud-computing/mongodb). + + +# further_reading links to references related to this path. Can be: + # Manuals for a tool / software mentioned (type: documentation) + # Blog about related topics (type: blog) + # General online references (type: website) + +further_reading: + - resource: + title: Serverless Framework + link: https://www.serverless.com + type: website + - resource: + title: Serverless Framework documentation + link: https://www.serverless.com/framework/docs + type: Documentation + - resource: + title: AWS Lambda + link: https://aws.amazon.com/lambda/ + type: Documentation + + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +weight: 21 # set to always be larger than the content in this path, and one more than 'review' +title: "Next Steps" # Always the same +layout: "learningpathall" # All files under learning paths have this same wrapper +--- diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_review.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_review.md new file mode 100644 index 000000000..9751fbef5 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/_review.md @@ -0,0 +1,42 @@ +--- +review: + - questions: + question: > + Which command do you use to invoke the Lambda function using Serverless Framework? + answers: + - serverless invoke + - serverless invoke lambda + - serverless invoke local --function + correct_answer: 3 + explanation: > + In Serverless Framework you use serverless invoke local --function to invoke the Lambda function + + - questions: + question: > + What is the serverless.yml file? + answers: + - It includes the structure and configuration options for setting up a Serverless service + - It provides AWS credentials + + correct_answer: 1 + explanation: > + The serverless.yml file defines a Serverless service. + + - questions: + question: > + Is the Serverless Framework compatible only with AWS? + answers: + - No + - Yes + + correct_answer: 1 + explanation: > + The Serverless Framework supports multiple cloud providers such as AWS, Google Cloud, and Microsoft Azure, providing a versatile and scalable solution for modern application development + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +title: "Review" # Always the same title +weight: 20 # Set to always be larger than the content in this path +layout: "learningpathall" # All files under learning paths have this same wrapper +--- diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/background.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/background.md new file mode 100644 index 000000000..3078d79bf --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/background.md @@ -0,0 +1,22 @@ +--- +title: Background +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +### What is the Serverless Framework? + +The Serverless Framework is an open-source toolkit that enables developers to build and deploy applications on cloud infrastructure without managing server operations. By abstracting away the underlying server management, it allows for greater focus on writing code and developing features, enhancing productivity and efficiency. The framework supports multiple cloud providers such as AWS, Google Cloud, and Microsoft Azure, providing a versatile and scalable solution for modern application development. With its powerful plug-ins and community support, the Serverless Framework simplifies complex deployment processes, promotes best practices, and facilitates rapid iteration, making it an essential tool in the DevOps landscape. + +A significant advantage of using the Serverless Framework is its use of Infrastructure as Code (IaC). IaC is a methodology that uses code to manage and provision the IT infrastructure required for applications. This approach allows developers to define their cloud infrastructure in a configuration file, such as `serverless.yml`, ensuring consistency and repeatability across different environments. By integrating IaC, the Serverless Framework allows teams to version control their infrastructure alongside their application code, reducing the risk of configuration drift and enabling seamless collaboration. + +Furthermore, IaC facilitates automated deployments and rollbacks, making it easier to maintain application stability and continuity. The Serverless Framework's IaC capabilities enable developers to describe their entire architecture, including AWS Lambda functions, API Gateway endpoints, DynamoDB tables, and more, in a unified and cohesive manner. This comprehensive approach not only streamlines the deployment process but also improves transparency and traceability, making it easier to manage complex systems and meet compliance requirements. Overall, the Serverless Framework and IaC allow development teams to build, deploy, and manage cloud-native applications more effectively and efficiently. + +A typical workflow with the Serverless Framework involves several key steps that streamline the development and deployment process. Initially, developers install the Serverless Framework CLI and set up their project by creating a new Serverless service. They then define the serverless configuration in the `serverless.yml` file, specifying functions, events, and resources needed for the application. The development phase involves writing the business logic for the serverless functions, usually in languages like JavaScript, Python, or Go. Developers can test their functions and configurations locally using the Serverless CLI, which helps in catching errors before deployment. + +Once the code and configurations are ready, the deployment process is initiated using the `serverless deploy` command. This command packages the application, creates the necessary cloud resources, and deploys the functions and services to the specified cloud provider. After deployment, developers can monitor and manage their serverless applications using the Serverless Dashboard or cloud provider-specific tools. The framework supports continuous integration and continuous deployment (CI/CD) pipelines, enabling automated testing, deployment, and rollback of serverless applications. This structured workflow accelerates the development process and also ensures a reliable and scalable deployment of serverless applications. + +In this Learning Path, you will learn how to set up the Serverless Framework for Amazon Web Services. Specifically, you will learn how to deploy a Lambda function. The main aim is to demonstrate how to automate the many manual tasks that you typically need to perform when provisioning cloud resources. You can use what you learn here to automate resource deployment for IoT solutions built using AWS. + diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/figures/01.png b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/figures/01.png new file mode 100644 index 000000000..7f9c44111 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/figures/01.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/lambda.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/lambda.md new file mode 100644 index 000000000..d862ebec5 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/lambda.md @@ -0,0 +1,182 @@ +--- +title: Deploy AWS Lambda +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +You are now ready to create a Serverless Framework and deploy the Lambda function. + +## Create a project +Open the terminal or command prompt and type: + +```console +serverless +``` + +This will start the wizard, in which you first select the Template. Use the arrow keys to select **AWS / Node.js / Simple Function** (as shown below) and then hit **Enter**: + +```output +Welcome to Serverless Framework V.4 + +Create a new project by selecting a Template to generate scaffolding for a specific use-case. + +? Select A Template: … + AWS / Node.js / HTTP API + AWS / Node.js / Express API + AWS / Node.js / Express API with DynamoDB + AWS / Node.js / Scheduled Task +❯ AWS / Node.js / Simple Function + AWS / Python / HTTP API + AWS / Python / Flask API + AWS / Python / Flask API with DynamoDB + AWS / Python / Scheduled Task + AWS / Python / Simple Function +``` + +Next, type the name for your project, e.g., **AwsServerlessLambda**. You will see the following output: + +```console +Name Your Project: · AwsServerlessLambda + +✔ Template Downloaded + +This Template contains a Serverless Framework Service. Services are stacks of AWS resources, and can contain your entire application or a part of it (e.g. users, comments, checkout, etc.). Enter a name using lowercase letters, numbers and hyphens only. + +? Serverless Framework V4 CLI is free for developers and organizations making less than $2 million annually, but requires an account or a license key. + +Please login/register or enter your license key: … +❯ Login/Register + Get A License + Enter A License Key + Explain Licensing Basics +``` + +Select **Login/Register**. This will open the web browser where you can create a Serverless Framework account. Then use this account to login. + +The wizard will display the following: +```console +Create or select an existing App below to associate with your Service, or skip. + +? Create Or Select An Existing App: … + Create A New App +❯ Skip Adding An App +``` +Select **Skip Adding An App**, and press Enter. You will see the following +```console +Your new Service "AwsServerlessLambda" is ready. Here are next steps: + +• Open Service Directory: cd AwsServerlessLambda +• Install Dependencies: npm install (or use another package manager) +• Deploy & Develop Your Service: serverless dev +``` + +The project is now ready for deployment. Before deploying it, let's review the project files. + +## Reviewing the project files +To review the project files created by the Serverless Framework CLI, navigate to the AwsServerlessLambda folder as follows: + +```console +cd AwsServerlessLambda +``` + +Then list the directory files: +```console +ls +``` + +You will see the following: +```output +README.md +handler.js +serverless.yml +``` + +Open and view the contents of the `serverless.yml` file: +```yml +# "org" ensures this Service is used with the correct Serverless Framework Access Key. +org: + +# "service" is the name of this project. This will also be added to your AWS resource names. +service: AwsServerlessLambda + +provider: + name: aws + runtime: nodejs20.x + +functions: + hello: + handler: handler.hello +``` + +This `serverless.yml` file contains the organization or account name within the Serverless Framework’s dashboard. It is used to group and manage services under a specific account in the Serverless Framework’s cloud platform. This is particularly useful for managing multiple projects or teams. + +The service keyword defines the name of your Serverless service. This name will be used as a prefix for all the AWS resources created by the framework. It helps in organizing and identifying resources associated with this particular service. + +The provider section specifies the cloud provider and the runtime environment for your service: +* name: aws. This indicates that the service will be deployed to Amazon Web Services (AWS). +* runtime: nodejs20.x. This specifies the runtime environment for your AWS Lambda functions. Here, nodejs20.x indicates that the functions will run using Node.js version 20.x. + +Next, you have the functions section, which defines the AWS Lambda functions that are part of this service: +* hello. This is the name of the function. You can name it whatever you like, but in this case, it is named hello. +* handler: handler.hello. This specifies the handler method for the function. It follows the format file.method. **handler** is the name of the file (e.g., handler.js), and **hello** is the name of the exported function within that file. + +The `serverless.yml` file defines a Serverless service named AwsServerlessLambda under the specified organization. The service is configured to run on AWS with Lambda functions using the Node.js 20.x runtime. It declares a single Lambda function named **hello**, which is handled by the hello method in the `handler.js` file. + +Let's now open the handler.js: + +```JavaScript +exports.hello = async (event) => { + return { + statusCode: 200, + body: JSON.stringify({ + message: 'Go Serverless v4.0! Your function executed successfully!' + }) + }; +}; +``` + +This JavaScript code defines a simple AWS Lambda function handler named **hello**. When triggered, it returns an HTTP response with a status code of 200 and a JSON-formatted message indicating that the function executed successfully. This handler function is designed to be used in a serverless environment, such as one managed by the Serverless Framework, and can be easily expanded to include more complex logic and event processing. For example, you can use Lambda to interact with other AWS resources. + +## Deploy resources +After becoming familiar with the project, you can now deploy it to the AWS cloud. In the terminal or command prompt, type the following command: + +```console +serverless deploy +``` + +You will see the following: +```console +Deploying "AwsServerlessLambda" to stage "dev" (us-east-1) +``` + +Let the Serverless Framework deploy the resources. After a few moments you will see the following: + +```console +Service deployed to stack AwsServerlessLambda-dev (48s) + +functions: + hello: AwsServerlessLambda-dev-hello (1.5 kB) +``` + +To test the deployment use the following command: + +```console +serverless invoke local --function hello +``` + +You will see the following output: +```output +{ + "statusCode": 200, + "body": "{\"message\":\"Go Serverless v4.0! Your function executed successfully!\"}" +} +``` + +Finally, log into the AWS console. Change the region to us-east-1 (N. Virginia). Then, go to the Lambda dashboard to see the deployed Lambda function. It will be named *AwsServerlessLambda-dev-hello*: + +![fig1](figures/01.png) + +## Summary +In this Learning Path, you learned about the Serverless Framework, its benefits, and its integration with Infrastructure as Code (IaC) for managing cloud resources. You set up the Serverless Framework for AWS and created a project. We explained its structure, specifically focusing on the serverless.yml file, including the structure and configuration options for setting up a basic Serverless service with AWS Lambda. Finally, we explored the Lambda handler function and deployed the resources to AWS. diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/setup.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/setup.md new file mode 100644 index 000000000..8e7f91a15 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/setup.md @@ -0,0 +1,32 @@ +--- +title: Set up Serverless Framework for AWS +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +In this section you will set up the Serverless Framework for AWS. This involves several steps, including installing the Serverless Framework, configuring AWS credentials, and creating a new Serverless service. + +## Installation +Start by installing Node.js version 18.20.3 or greater and npm (Node Package Manager). You can download and install them from the [official Node.js website](https://nodejs.org/en). + +Then, open the terminal or command prompt and type the following: +```console +npm install -g serverless +``` + +## AWS Credentials +You need AWS credentials to deploy your Serverless application to AWS. You can create these credentials in the AWS Management Console by following these instructions: +1. Create IAM User: +* Go to the [IAM console](https://console.aws.amazon.com/iam/). +* Create a new user with programmatic access and attach the AdministratorAccess policy. +2. Configure Credentials: +* Use the AWS CLI to configure your credentials. If you don’t have the AWS CLI installed, follow these [installation instructions](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). + +Run the following command to configure your credentials: +``` +aws configure +``` + +Enter your Access Key ID, Secret Access Key, and region. You can use a default value for the output. diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_index.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_index.md new file mode 100644 index 000000000..0a48c52d8 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_index.md @@ -0,0 +1,42 @@ +--- +title: Deploy and integrate AWS Lambda with DynamoDB using the Serverless Framework + +minutes_to_complete: 30 + +who_is_this_for: This learning path is for software developers interested in learning how to deploy serverless applications using the Serverless Framework and Amazon Web Services. It automates several manual deployment steps that developers typically need to perform when deploying microservice-based or IoT applications. + +learning_objectives: + - Create a multi-resource Serverless Framework solution. + - Automate deployment of AWS Lambda function consuming data from DynamoDB. + +prerequisites: + - A Windows on Arm computer such as [Windows Dev Kit 2023](https://learn.microsoft.com/en-us/windows/arm/dev-kit), a Lenovo Thinkpad X13s running Windows 11, or a Windows on Arm [virtual machine](/learning-paths/cross-platform/woa_azure/). + - Any code editor. [Visual Studio Code for Arm64](https://code.visualstudio.com/docs/?dv=win32arm64user) is suitable. + - Completion of this [Learning Path](/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/). + +author_primary: Dawid Borycki + +### Tags +skilllevels: Introductory +subjects: Cloud Computing +cloud_service_providers: Amazon Web Services + +armips: + - Neoverse + +tools_software_languages: + - Node.js + - Visual Studio Code + +operatingsystems: + - Linux + - Windows + - MacOS + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_next-steps.md new file mode 100644 index 000000000..af17ce960 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_next-steps.md @@ -0,0 +1,40 @@ +--- +# ================================================================================ +# Edit +# ================================================================================ + +next_step_guidance: > + You can continue learning about migrating applications to Arm. +# 1-3 sentence recommendation outlining how the reader can generally keep learning about these topics, and a specific explanation of why the next step is being recommended. + +recommended_path: "/learning-paths/servers-and-cloud-computing/migration/" +# Link to the next learning path being recommended(For example this could be /learning-paths/servers-and-cloud-computing/mongodb). + + +# further_reading links to references related to this path. Can be: + # Manuals for a tool / software mentioned (type: documentation) + # Blog about related topics (type: blog) + # General online references (type: website) + +further_reading: + - resource: + title: Terraform on Azure + link: https://learn.arm.com/learning-paths/servers-and-cloud-computing/azure-terraform/ + type: Tutorial + - resource: + title: Azure Virtual Machines with Ampere Altra Arm–based processors—generally available + link: https://azure.microsoft.com/en-us/blog/azure-virtual-machines-with-ampere-altra-arm-based-processors-generally-available/ + type: Blog + - resource: + title: About Azure bastion + link: https://learn.microsoft.com/en-us/azure/bastion/bastion-overview + type: Documentation + + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +weight: 21 # set to always be larger than the content in this path, and one more than 'review' +title: "Next Steps" # Always the same +layout: "learningpathall" # All files under learning paths have this same wrapper +--- diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_review.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_review.md new file mode 100644 index 000000000..7e3c2e4eb --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/_review.md @@ -0,0 +1,45 @@ +--- +review: + - questions: + question: > + How does the serverless.yml file facilitate the deployment and management of serverless applications and what are its key components? + answers: + - The serverless.yml file is used only for defining environment variables for AWS Lambda functions and it does not impact resource deployment. + - The serverless.yml file defines the service configuration, including functions, events, and resources, enabling the Serverless Framework to automate deployment and manage cloud infrastructure. + - The serverless.yml file serves as a configuration template for the AWS CloudFormation stack and is primarily used for setting user permissions for the serverless application. + correct_answer: 2 + explanation: > + serverless.yml file is used to define services, functions, and resources, and its role in automating the deployment process + + - questions: + question: > + What steps are involved in using the serverless deploy command and how does it ensure that all resources and functions are correctly provisioned and configured in AWS? + answers: + - The serverless deploy command only packages the application code into a ZIP file and uploads it to an S3 bucket without provisioning any infrastructure. + - The serverless deploy command packages the application, generates an infrastructure template, uploads the artifacts, and executes the deployment, provisioning all defined resources and functions automatically. + - The serverless deploy command executes the application locally and prints logs to the console but does not interact with AWS services. + + correct_answer: 2 + explanation: > + Serverless Framework packages code, uploads it, and provisions infrastructure automatically. + + - questions: + question: > + How can you configure an AWS Lambda function to interact with a DynamoDB table and what are the benefits of using ES Modules and IAM roles in this context? + answers: + - AWS Lambda functions cannot directly interact with DynamoDB tables; they must use an intermediate service like S3 to access data. + - You configure an AWS Lambda function to interact with a DynamoDB table by using the AWS SDK to perform read/write operations and defining IAM roles to manage permissions, while ES Modules provide a modern syntax for importing dependencies and organizing code. + - AWS Lambda functions require a direct connection string to the DynamoDB table and ES Modules are not supported in AWS environments, making CommonJS the only viable module system. + + correct_answer: 2 + explanation: > + AWS SDK is used to perform read/write operations and defining IAM roles to manage permissions, while ES Modules provide a modern syntax for importing dependencies and organizing code. + + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +title: "Review" # Always the same title +weight: 20 # Set to always be larger than the content in this path +layout: "learningpathall" # All files under learning paths have this same wrapper +--- diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/configuration.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/configuration.md new file mode 100644 index 000000000..1f785f386 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/configuration.md @@ -0,0 +1,350 @@ +--- +title: Service declaration +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +### Service Declaration +In this section, you will declare the serverless service composed of the following AWS resources: + 1. DynamoDB Table: This will store hypothetical sensor data, including timestamps and randomly generated temperatures. + 2. Two AWS Lambda Functions: The first function will write temperatures to the DynamoDB table, and the second will retrieve the average temperature value. + 3. IAM Role: A set of permissions that enable the AWS Lambda functions to write and read data from the DynamoDB table. + +### Declare a service +To create a new serverless service, open the command prompt or terminal and type the following: + +```console +serverless +``` + +In the wizard that appears, proceed as follows: +1. Select the **AWS / Node.js / Simple Function** template. +2. In the *Name Your Project field*, type **AwsServerlessDynamoDbLambda**. +3. In the *Please login/register* or enter your license key section, select **Login/Register** and sign in to the Serverless Framework. +4. In the *Create Or Select An Existing App section*, select **Skip Adding An App**. + +The tool will generate the project composed of the following files: +1. `serverless.yml` - this contains the declaration of the infrastructure and services for a serverless application. +2. `handler.js` - you use this file to implement the core functionality of your serverless application, handling business logic and interactions with other services. Here, you will use this file to implement Lambda functions. + +### serverless.yml +To define the AWS resources, open `serverless.yml` and modify it as follows: +```YAML +org: + +service: AwsServerlessDynamoDbLambda + +provider: + name: aws + runtime: nodejs20.x + region: us-east-1 + stage: dev + environment: + DYNAMODB_TABLE: SensorReadings + iam: + role: + statements: + - Effect: Allow + Action: + - dynamodb:BatchWriteItem + - dynamodb:PutItem + - dynamodb:UpdateItem + - dynamodb:GetItem + - dynamodb:Scan + - dynamodb:Query + Resource: + - arn:aws:dynamodb:${self:provider.region}:*:table/${self:provider.environment.DYNAMODB_TABLE} + +functions: + writeTemperatures: + handler: handler.writeTemperatures + events: + - http: + path: write-temperatures + method: post + + getAverageTemperature: + handler: handler.getAverageTemperature + events: + - http: + path: get-average-temperature + method: get + +resources: + Resources: + SensorReadingsTable: + Type: AWS::DynamoDB::Table + Properties: + TableName: ${self:provider.environment.DYNAMODB_TABLE} + AttributeDefinitions: + - AttributeName: id + AttributeType: S + - AttributeName: timestamp + AttributeType: N + KeySchema: + - AttributeName: id + KeyType: HASH + - AttributeName: timestamp + KeyType: RANGE + ProvisionedThroughput: + ReadCapacityUnits: 1 + WriteCapacityUnits: 1 + Outputs: + WriteTemperaturesEndpoint: + Description: "Endpoint for the writeTemperatures function" + Value: + Fn::Join: + - "" + - - "https://" + - Ref: "ApiGatewayRestApi" + - ".execute-api.${self:provider.region}.amazonaws.com/${self:provider.stage}/write-temperatures" + GetAverageTemperatureEndpoint: + Description: "Endpoint for the getAverageTemperature function" + Value: + Fn::Join: + - "" + - - "https://" + - Ref: "ApiGatewayRestApi" + - ".execute-api.${self:provider.region}.amazonaws.com/${self:provider.stage}/get-average-temperature" +``` + +The first section of the above file includes the following: +* org: - this specifies the organization name in the Serverless Framework’s dashboard. +* service: AwsServerlessDynamoDbLambda - defines the name of the service. This name is used to organize and identify the resources created by this Serverless service. + +In the Serverless Framework a service is the fundamental unit of organization. It represents a single project or application and encapsulates all the functions, resources, and configurations necessary to deploy and manage that project in a serverless environment. A service can consist of multiple functions, each with its own triggers and configuration, and can define the necessary cloud resources such as databases, storage, and other infrastructure components. + +After the service definition, there is the provider section which specifies the cloud provider (e.g., AWS) and general settings such as runtime, region, and environment variables. Here, the provider section contains the following: +* name: aws - this specifies that the provider is AWS. +* runtime: nodejs20.x - sets the runtime environment for the Lambda functions to Node.js 20.x. +* region: us-east-1 - defines the AWS region where the service will be deployed. +* stage: dev - sets the deployment stage to dev. + +Next, you have the environment section which includes one item: +* DYNAMODB_TABLE: SensorReadings. This defines an environment variable DYNAMODB_TABLE with the value SensorReadings which will be used to name the DynamoDB table. + +In the IAM section you define one role. This role specifies a list of actions that are allowed (dynamodb:BatchWriteItem, dynamodb:PutItem, dynamodb:UpdateItem, dynamodb:GetItem, dynamodb:Scan, dynamodb:Query) on a given resource. The resource is specified using Amazon Resource Name (ARN). + +ARN is a unique identifier used to identify resources in AWS. ARNs are used throughout AWS to uniquely identify resources such as EC2 instances, S3 buckets, DynamoDB tables, Lambda functions, IAM roles, and more. + +Here, you use ARN to identify the DynamoDB table that role actions are allowed on, using the ${self:provider.region} and ${self:provider.environment.DYNAMODB_TABLE} variables to dynamically insert the region and table name. + +The `serverless.yml` defines two AWS Lambda functions: +1. writeTemperatures. Its handler is set to handler.writeTemperatures. This function will be triggered through the HTTP POST event. +2. getAverageTemperature with handler.getAverageTemperature handler. This function will be triggered through the GET POST event. + +In the resources section you define a DynamoDB table resource with the following attributes id as a string and timestamp as a number. Additionally, set the read and write capacity units to 1 each (provisioned throughput). + +Finally, the outputs section is used to display the endpoints of both Lambda functions. You will use those endpoints to trigger AWS Lambda functions. + +### handler.js +You will now implement the two AWS Lambda functions. Open the `handler.js`, and replace its contents with the following code: + +```JavaScript +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { DynamoDBDocumentClient, BatchWriteCommand, ScanCommand } from '@aws-sdk/lib-dynamodb'; + +const client = new DynamoDBClient({ region: "us-east-1" }); +const dynamoDb = DynamoDBDocumentClient.from(client); +const tableName = process.env.DYNAMODB_TABLE; + +// Function to write random temperature records +export const writeTemperatures = async (event) => { + const records = []; + const N = 20; + for (let i = 0; i < N; i++) { + const record = { + id: `temp-${Date.now()}-${i}`, + timestamp: Date.now(), + temperature: (Math.random() * 30) + 20, // Random fractional temperature between 20 and 50 + }; + records.push({ + PutRequest: { + Item: record, + }, + }); + } + + const params = { + RequestItems: { + [tableName]: records, + }, + }; + + try { + await dynamoDb.send(new BatchWriteCommand(params)); + return { + statusCode: 200, + body: JSON.stringify({ + message: 'Temperature records written successfully!', + }), + }; + } catch (error) { + return { + statusCode: 500, + body: JSON.stringify({ + message: 'Failed to write temperature records', + error: error.message, + }), + }; + } +}; + +// Function to retrieve last N temperature records, average them, and return the result +export const getAverageTemperature = async (event) => { + const N = 10; + + const params = { + TableName: tableName, + Limit: N, + ScanIndexForward: false + }; + + try { + const data = await dynamoDb.send(new ScanCommand(params)); + const temperatures = data.Items.map(item => item.temperature); + const averageTemperature = temperatures.reduce((sum, value) => sum + value, 0) / temperatures.length; + + return { + statusCode: 200, + body: JSON.stringify({ + averageTemperature, + }), + }; + } catch (error) { + return { + statusCode: 500, + body: JSON.stringify({ + message: 'Failed to retrieve temperature records', + error: error.message, + }), + }; + } +}; +``` + +The code defines the two AWS Lambda functions that interact with a DynamoDB table: + 1. writeTemperatures - writes a batch of random temperature records to the DynamoDB table. + 2. getAverageTemperature - retrieves the last N temperature records from the table, calculates the average, and returns it. + +The first section of the code (see below): +```JavaScript +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { DynamoDBDocumentClient, BatchWriteCommand, ScanCommand } from '@aws-sdk/lib-dynamodb'; +``` +imports the following components: +* DynamoDBClient and DynamoDBDocumentClient: these are used to interact with DynamoDB. The DynamoDBClient is the base client for AWS SDK operations, while DynamoDBDocumentClient provides a higher-level abstraction for working with documents in DynamoDB. +* BatchWriteCommand and ScanCommand: these are commands used to perform batch writes and scans on DynamoDB tables. + +After this, you have the following statements: +```JavaScript +const client = new DynamoDBClient({ region: "us-east-1" }); +const dynamoDb = DynamoDBDocumentClient.from(client); +const tableName = process.env.DYNAMODB_TABLE; +``` + +These statements initialize a new DynamoDBClient targeting the us-east-1 AWS region, create a DynamoDBDocumentClient from the base DynamoDBClient to work with DynamoDB documents. The final statement fetches the table name from the environment variable DYNAMODB_TABLE. This variable is set automatically by the Serverless Framework when you deploy the resources (it comes from the serverless.yml) + +Next, there is a definition of the `writeTemperatures` function: +```JavaScript +export const writeTemperatures = async (event) => { + const records = []; + const N = 20; + for (let i = 0; i < N; i++) { + const record = { + id: `temp-${Date.now()}-${i}`, + timestamp: Date.now(), + temperature: (Math.random() * 30) + 20, // Random fractional temperature between 20 and 50 + }; + records.push({ + PutRequest: { + Item: record, + }, + }); + } + + const params = { + RequestItems: { + [tableName]: records, + }, + }; + + try { + await dynamoDb.send(new BatchWriteCommand(params)); + return { + statusCode: 200, + body: JSON.stringify({ + message: 'Temperature records written successfully!', + }), + }; + } catch (error) { + return { + statusCode: 500, + body: JSON.stringify({ + message: 'Failed to write temperature records', + error: error.message, + }), + }; + } +}; +``` + +The purpose of the above function is to write 20 random temperature records (in a range of 20-50 degree Celsius) to the DynamoDB table. To do this, the function uses a loop to create 20 records with the following: +* id - a unique identifier combining the current timestamp and loop index. +* timestamp - the current time in milliseconds. +* temperature - a random fractional value between 20 and 50. + +Then, the function uses `BatchWriteCommand` to send the records to the table. Also it catches and returns errors with an appropriate HTTP status code. + +The second function, `getAverageTemperature`, is defined as follows: +```JavaScript +export const getAverageTemperature = async (event) => { + const N = 10; + + const params = { + TableName: tableName, + Limit: N, + ScanIndexForward: false + }; + + try { + const data = await dynamoDb.send(new ScanCommand(params)); + const temperatures = data.Items.map(item => item.temperature); + const averageTemperature = temperatures.reduce((sum, value) => sum + value, 0) / temperatures.length; + + return { + statusCode: 200, + body: JSON.stringify({ + averageTemperature, + }), + }; + } catch (error) { + return { + statusCode: 500, + body: JSON.stringify({ + message: 'Failed to retrieve temperature records', + error: error.message, + }), + }; + } +}; +``` +The `getAverageFunction` retrieves the last 10 temperature records and calculates their average. To do this, the function uses the `ScanCommand` to fetch items from the table, limiting it to 10 records. The retrieved records are mapped to extract temperatures and then the function calculates the average using the reduce JavaScript function. The `getAverageTemperature` returns the average temperature in the response body. + +The above code demonstrates a common pattern in serverless applications where functions interact with AWS services like DynamoDB to store and retrieve data. + +### package.json +To make the code function properly, we need to add the package.json file (save it next to handler.js) as follows: +```JSON +{ + "type": "module" +} +``` + +The "type": "module" field in the package.json file is necessary when using ES Modules in a Node.js application. It is needed for AWS Lambda for two reasons: +1. To enable ES Module Syntax. By default, Node.js treats files as CommonJS modules. The "type": "module" declaration in package.json tells Node.js to interpret .js files as ES Modules. This allows you to use modern JavaScript features such as import and export statements which are part of the ES Module specification. +2. To enable compatibility with AWS Lambda. AWS Lambda supports Node.js runtimes that can interpret both CommonJS and ES Modules. However, to use ES Module syntax directly, you need to explicitly set the module type in your package.json. Without "type": "module", Node.js will throw errors if you try to use import and export syntax, as it defaults to CommonJS which uses require and module.exports. + +You are now ready to deploy the serverless application. diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/deployment.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/deployment.md new file mode 100644 index 000000000..7ac00f3d3 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/deployment.md @@ -0,0 +1,63 @@ +--- +title: Deployment +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +### Objective +In this section you will deploy the resources you declared earlier. + +### Deployment +To deploy the resources, open the terminal or command prompt and navigate to AwsServerlessDynamoDbLambda folder. Then invoke the following command: + +```console +serverless deploy +``` + +When you invoke the serverless deploy command, the Serverless Framework orchestrates a series of steps to deploy your serverless application to the specified cloud provider (e.g., AWS). Here’s a brief description of what happens during this process. The Serverless Framework validates your `serverless.yml` configuration file to ensure there are no syntax errors or missing information. Then, it compiles the service definition and resolves any variable references (e.g., environment variables or stage variables). + +In the next step, the framework packages your application code, including all necessary dependencies, into a deployable artifact (e.g., a ZIP file). It respects any packaging configuration specified in the `serverless.yml`, such as including or excluding specific files or directories. + +For AWS, the framework generates a CloudFormation template based on your service configuration, which describes the infrastructure and resources required (e.g., Lambda functions, API Gateway, DynamoDB tables). This template includes all resource definitions, IAM roles, permissions, and configuration settings. + +The packaged artifact (code and configuration) is uploaded to the cloud provider’s storage (e.g., S3 for AWS). The framework handles authentication and authorization using the credentials and roles configured for the deployment. + +The Serverless Framework initiates the deployment process by executing the infrastructure-as-code (IaC) template. For AWS, this involves creating or updating a CloudFormation stack, which provisions and configures all defined resources. + +The cloud provider provisions the specified resources, such as Lambda functions, API Gateway endpoints, DynamoDB tables, S3 buckets, and any other services required by the application. IAM roles and permissions are configured to ensure that functions have the necessary access to interact with other resources. + +Once deployment is successful, the framework outputs relevant information such as API endpoints, function ARNs, and any other specified outputs defined in your `serverless.yml`. In this case, you will see the following: + +```output +endpoints: + POST - https://7j1vtlqff2.execute-api.us-east-1.amazonaws.com/dev/write-temperatures + GET - https://7j1vtlqff2.execute-api.us-east-1.amazonaws.com/dev/get-average-temperature +functions: + writeTemperatures: AwsServerlessDynamoDbLambda2-dev-writeTemperatures (2.3 kB) + getAverageTemperature: AwsServerlessDynamoDbLambda2-dev-getAverageTemperature (2.3 kB) +``` + +You can use the first URL to invoke the AWS Lambda that writes temperatures to the DynamoDB: +```console +curl -X POST +``` + +Then, you invoke the second function that retrieves the records and averages them: +```console +curl +``` + +The output of the above commands should be similar to the figure shown below: + +![fig1](figures/01.png) + +To clean up the resources you created, you can use a single command: + +```console +serverless remove +``` + +### Summary +In this Learning Path, you built a complete serverless application using the Serverless Framework, configured AWS resources, deployed Lambda functions, and ensured proper management and accessibility of the services. This workflow demonstrates how serverless architecture can simplify the deployment and scaling of cloud applications, allowing developers to focus on business logic rather than infrastructure management. diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/figures/01.png b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/figures/01.png new file mode 100644 index 000000000..d9d8d22c2 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/figures/01.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/objective.md b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/objective.md new file mode 100644 index 000000000..743e21cca --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/serverless-framework-aws-lambda-dynamodb/objective.md @@ -0,0 +1,19 @@ +--- +title: Objective +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- +### What is the Serverless Framework? + +The Serverless Framework is an open-source toolkit that enables developers to build and deploy applications on cloud infrastructure without managing server operations. By abstracting away the underlying server management, it allows for greater focus on writing code and developing features, enhancing productivity and efficiency. The framework supports multiple cloud providers such as AWS, Google Cloud, and Microsoft Azure, providing a versatile and scalable solution for modern application development. With its powerful plug-ins and community support, the Serverless Framework simplifies complex deployment processes, promotes best practices, and facilitates rapid iteration, making it an essential tool in the DevOps landscape. + +In the previous [Learning Path](/learning-paths/servers-and-cloud-computing/serverless-framework-aws-intro/), you learned how to set up the Serverless Framework for AWS and deploy a simple AWS Lambda function. For more tutorials about running IoT applications by manually creating various AWS resources, please review the following learning paths: +1. [Use Amazon DynamoDB for your IoT applications running on Arm64](/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb). +2. [Use AWS Lambda for IoT applications running on Arm64](/learning-paths/laptops-and-desktops/win_aws_iot_lambda). +3. [Integrate AWS Lambda with DynamoDB for IoT applications running on Windows on Arm](/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb). + +While manual resource provisioning has its benefits, it can become increasingly problematic as you start deploying your applications to the cloud. As your solutions grow and become more complex, the challenges associated with manual provisioning escalate. This is where the Serverless Framework comes into play, offering a streamlined and efficient way to manage your cloud resources. + +In this learning path, you will learn how to automatically deploy a multi-resource serverless solution to AWS. By leveraging the Serverless Framework, you can simplify the deployment process, enhance scalability, and reduce the operational overhead associated with managing cloud infrastructure manually. Specifically, you will create a solution composed of the DynamoDB table and the AWS Lambda function. The latter will consume the data from the table. The function will calculate the average of numerical values in the selected column. This is similar to what you learned in this [Learning Path](/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb). diff --git a/content/learning-paths/smartphones-and-mobile/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/4-Prepare-LLaMA-models.md b/content/learning-paths/smartphones-and-mobile/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/4-Prepare-LLaMA-models.md index de36c0f03..7cbe40910 100755 --- a/content/learning-paths/smartphones-and-mobile/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/4-Prepare-LLaMA-models.md +++ b/content/learning-paths/smartphones-and-mobile/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/4-Prepare-LLaMA-models.md @@ -52,50 +52,17 @@ consolidated.00.pth params.json tokenizer.model Export model and generate `.pte` file. Run the Python command to export the model: ```bash -python -m examples.models.llama2.export_llama --checkpoint -p -kv --use_sdpa_with_kv_cache -X -qmode 8da4w --group_size 128 -d fp32 --metadata '{"get_bos_id":128000, "get_eos_id":128001}' --embedding-quantize 4,32 --output_name="llama3_kv_sdpa_xnn_qe_4_32.pte" +python -m examples.models.llama2.export_llama --checkpoint llama-models/models/llama3_1/Meta-Llama-3.1-8B/consolidated.00.pth -p llama-models/models/llama3_1/Meta-Llama-3.1-8B/params.json -kv --use_sdpa_with_kv_cache -X -qmode 8da4w --group_size 128 -d fp32 --metadata '{"get_bos_id":128000, "get_eos_id":128001}' --embedding-quantize 4,32 --output_name="llama3_kv_sdpa_xnn_qe_4_32.pte" ``` -Where `` and `` are the paths to the downloaded model files, found in llama3/Meta-Llama-3.1-8B by default. - Due to the larger vocabulary size of Llama 3, you should quantize the embeddings with `--embedding-quantize 4,32` to further reduce the model size. -### Download and export stories110M model - -Follow the steps in this section, if you want to deploy and run a smaller model for educational purposes instead of the full Llama 3 8B model. - -From the `executorch` root directory follow these steps: - -1. Download `stories110M.pt` and `tokenizer.model` from Github. - - ``` bash - wget "https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.pt" - wget "https://raw.githubusercontent.com/karpathy/llama2.c/master/tokenizer.model" - ``` - -2. Create params file. - - ``` bash - echo '{"dim": 768, "multiple_of": 32, "n_heads": 12, "n_layers": 12, "norm_eps": 1e-05, "vocab_size": 32000}' > params.json - ``` - -3. Export model and generate `.pte` file. - - ``` bash - python -m examples.models.llama2.export_llama -c stories110M.pt -p params.json -X - ``` - -4. Create tokenizer.bin. - - ``` bash - python -m examples.models.llama2.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin - ``` - ## Optional: Evaluate Llama 3 model accuracy You can evaluate model accuracy using the same arguments as above: ``` bash -python -m examples.models.llama2.eval_llama -c -p -t -d fp32 --max_seq_len 2048 --limit 1000 +python -m examples.models.llama2.eval_llama -c llama-models/models/llama3_1/Meta-Llama-3.1-8B/consolidated.00.pth -p llama-models/models/llama3_1/Meta-Llama-3.1-8B/params.json -t llama-models/models/llama3_1/Meta-Llama-3.1-8B/tokenizer.model -d fp32 --max_seq_len 2048 --limit 1000 ``` {{% notice Warning %}} @@ -134,16 +101,6 @@ Follow the steps below to build ExecuTorch and the Llama runner to run models. For Llama 3, add `-DEXECUTORCH_USE_TIKTOKEN=ON` option. {{% /notice %}} -{{% notice Note %}} -If you are building on a Mac, there is currently an [open bug](https://github.com/pytorch/executorch/issues/3600) that adds a `--gc-sections` flag to ld options. You need to remove this flag for Mac by opening `examples/models/llama2/CMakeLists.txt` and removing these lines: - -``` -if(CMAKE_BUILD_TYPE STREQUAL "Release") - target_link_options(llama_main PRIVATE "LINKER:--gc-sections,-s") -endif() -``` -{{% /notice %}} - Run cmake: ``` bash @@ -163,9 +120,7 @@ Run cmake: 3. Run the model: ``` bash - cmake-out/examples/models/llama2/llama_main --model_path= --tokenizer_path= --prompt= + cmake-out/examples/models/llama2/llama_main --model_path=llama3_kv_sdpa_xnn_qe_4_32.pte --tokenizer_path=llama-models/models/llama3_1/Meta-Llama-3.1-8B/tokenizer.model --prompt= ``` The run options are available on [GitHub](https://github.com/pytorch/executorch/blob/main/examples/models/llama2/main.cpp#L18-L40). - - For Llama 3, you can pass the original `tokenizer.model` (without converting to `.bin` file). diff --git a/content/learning-paths/smartphones-and-mobile/android_neon/hello_neon.md b/content/learning-paths/smartphones-and-mobile/android_neon/hello_neon.md index e72e7e4b2..9012eccb9 100644 --- a/content/learning-paths/smartphones-and-mobile/android_neon/hello_neon.md +++ b/content/learning-paths/smartphones-and-mobile/android_neon/hello_neon.md @@ -9,13 +9,13 @@ layout: "learningpathall" ## Before you begin -You will need a x86_64 or Apple M1 computer with [Android Studio](https://developer.android.com/studio) installed. Currently, Android Studio is not supported on Windows and Linux machines with Arm based CPUs. +You will need a x86_64 or Apple silicon computer with [Android Studio](https://developer.android.com/studio) installed. Currently, Android Studio is not supported on Windows and Linux machines with Arm based CPUs. You will also need a Armv8 powered smartphone running Android to run your application on. We tested the application in this learning path on a Google Pixel 7 smartphone. ## Overview -Do not repeat yourself (DRY) is one of the major principles of software development. Following this principle means reusing your code using functions. However, invoking a function adds extra overhead. In certain cases compilers can reduce this overhead by taking advantage of built-in functions called intrinsics. The compiler replaces the intrinsics that are used in high-level programming languages, for example C/C++, with mostly 1:1 mapped assembly instructions. +Don't repeat yourself (DRY) is one of the major principles of software development. Following this principle means reusing your code using functions. However, invoking a function adds extra overhead. In certain cases compilers can reduce this overhead by taking advantage of built-in functions called intrinsics. The compiler replaces the intrinsics that are used in high-level programming languages, for example C/C++, with mostly 1:1 mapped assembly instructions. To further improve performance, you need to use assembly code. However, with Arm Neon intrinsics you can avoid the complication of writing assembly functions. Instead you only need to program in C/C++ and call the intrinsics or instruction functions that are declared in the `arm_neon.h` header file. Neon intrinsics can improve the performance of your application. @@ -31,7 +31,7 @@ In this section, you will setup your Android development environment to use Neon ![img1](neon1.png) -3. Set the application name to `Neon Intrinsics`, select `Java` as the language, leave the Minimum SDK selection as `API 24: Android 7.0(Nougat)` and click `Next`, as shown below: +3. Set the application name to `Neon Intrinsics`, select `Java` as the language, leave the Minimum SDK selection as `API 24: Android 7.0(Nougat)`. Make sure you choose `Groovy DSL` as the Build configuration language, which may not be the default option. Then click `Next`. ![img2](neon2.png) @@ -39,14 +39,14 @@ In this section, you will setup your Android development environment to use Neon ![img3](neon3.png) -5. Install `CMake` and `Android NDK`. Click on `File->Settings`. On the left pane, under `System Settings` select `Android SDK`. From the tabs on the right, click on `SDK Tools`. Select the `CMake` and `NDK(Side by side)` packages and click on `Apply` followed by `OK`, as shown below: +5. Install `CMake` and `Android NDK`. Click on `Android Studio` or `File`and choose `Settings`. On the left pane, under `System Settings` select `Android SDK`. From the tabs on the right, click on `SDK Tools`. Select the `CMake` and `NDK(Side by side)` packages and click on `Apply` followed by `OK`, as shown below: ![img4](neon4.png) 6. The project that you created comprises of one activity that is implemented within the `MainActivity` class. It prints a "Hello from C++" string in the app which comes from the `native-lib` library. Open `app/cpp/native-lib.cpp` as shown below to view the code for this library: ![img5](neon5.png) - + 7. The Android NDK supports different Application Binary Interfaces(ABI). Set the NDK for this project to use `arm64-v8a` ABI. This ABI is for Armv8-A based CPUs, which support the 64-bit Arm architecture. On the left pane, expand `Gradle Scripts` and open `build.gradle(Module :app)` file. Add the line below to the `defaultConfig` section: ```console @@ -59,7 +59,7 @@ The code in this section should look like: defaultConfig { applicationId "com.example.neonintrinsics" minSdk 24 - targetSdk 33 + targetSdk 34 versionCode 1 versionName "1.0" ndk.abiFilters 'arm64-v8a' @@ -71,27 +71,28 @@ defaultConfig { 8. Enable support for Neon intrinsics, by passing the following CMake argument in `build.grade(Module :app)` ```console -arguments "-DANDROID_ARM_NEON=ON" +arguments "-DANDROID_ARM_NEON=ON" ``` This argument is added under `externalNativeBuild` in the `defaultConfig` section of the file. The complete `build.gradle(Module :app)` file is shown below for completeness: ```console plugins { - id 'com.android.application' + alias(libs.plugins.android.application) } android { namespace 'com.example.neonintrinsics' - compileSdk 33 + compileSdk 34 defaultConfig { applicationId "com.example.neonintrinsics" minSdk 24 - targetSdk 33 + targetSdk 34 versionCode 1 versionName "1.0" ndk.abiFilters 'arm64-v8a' + testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" externalNativeBuild { cmake{ @@ -124,18 +125,18 @@ android { dependencies { - implementation 'androidx.appcompat:appcompat:1.6.1' - implementation 'com.google.android.material:material:1.9.0' - implementation 'androidx.constraintlayout:constraintlayout:2.1.4' - testImplementation 'junit:junit:4.13.2' - androidTestImplementation 'androidx.test.ext:junit:1.1.5' - androidTestImplementation 'androidx.test.espresso:espresso-core:3.5.1' + implementation libs.appcompat + implementation libs.material + implementation libs.constraintlayout + testImplementation libs.junit + androidTestImplementation libs.ext.junit + androidTestImplementation libs.espresso.core } ``` With these changes you can use Neon intrinsics, which are declared within the `arm_neon.h` header. -9. Build the application to make sure your Android app development setup is working properly. +9. Build the application to make sure your Android app development setup is working properly. Select `Build->Make Project`. When the build completes, you should see messages in the Build console similar to those shown below: ```output @@ -145,7 +146,7 @@ BUILD SUCCESSFUL in 2m 44s Build Analyzer results available ``` -10. Run this application on an 64-bit Arm powered smartphone running Android. We ran the app on a Google Pixel 7 phone using a USB cable connected to the development machine running Android Studio. You can also pair your phone over Wi-Fi. +10. Run this application on an 64-bit Arm powered smartphone running Android. We ran the app on a Google Pixel 7 phone using a USB cable connected to the development machine running Android Studio. You can also pair your phone over Wi-Fi. Connect your device and select `Tools->Device Manager`. Select the `Physical` tab and your connected phone should show up on the list of devices as shown in the image below: ![img6](neon6.png) diff --git a/content/learning-paths/smartphones-and-mobile/android_neon/neon2.png b/content/learning-paths/smartphones-and-mobile/android_neon/neon2.png index 204aacdb8..201b2b9bb 100644 Binary files a/content/learning-paths/smartphones-and-mobile/android_neon/neon2.png and b/content/learning-paths/smartphones-and-mobile/android_neon/neon2.png differ diff --git a/content/learning-paths/smartphones-and-mobile/android_neon/neon4.png b/content/learning-paths/smartphones-and-mobile/android_neon/neon4.png index 672b98d37..6eca366ca 100644 Binary files a/content/learning-paths/smartphones-and-mobile/android_neon/neon4.png and b/content/learning-paths/smartphones-and-mobile/android_neon/neon4.png differ diff --git a/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/1-x86-install-dependencies.md b/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/1-x86-install-dependencies.md index 863a03e5e..3a3df78db 100644 --- a/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/1-x86-install-dependencies.md +++ b/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/1-x86-install-dependencies.md @@ -36,7 +36,7 @@ RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers WORKDIR /home/$USER USER ubuntu -RUN sudo apt install unzip python3-pip -y +RUN sudo apt-get install unzip python3-pip -y RUN sudo apt-get install openjdk-11-jdk -y ENV JAVA_HOME "/usr/lib/jvm/java-11-openjdk-amd64" @@ -50,9 +50,9 @@ RUN git clone --depth 1 https://github.com/google/mediapipe.git WORKDIR /home/$USER/mediapipe RUN pip3 install -r requirements.txt -RUN bash setup_android_sdk_and_ndk.sh $HOME/Android/Sdk $HOME/Android/Sdk/ndk-bundle r21 --accept-licenses +RUN bash setup_android_sdk_and_ndk.sh $HOME/Android/Sdk $HOME/Android/Sdk/ndk-bundle r26d --accept-licenses -ENV PATH "$PATH:$HOME/Android/Sdk/ndk-bundle/android-ndk-r21/toolchains/llvm/prebuilt/linux-x86_64/bin" +ENV PATH "$PATH:$HOME/Android/Sdk/ndk-bundle/android-ndk-r26d/toolchains/llvm/prebuilt/linux-x86_64/bin" ENV GLOG_logtostderr=1 ``` @@ -60,7 +60,7 @@ ENV GLOG_logtostderr=1 Build the Docker image: ``` -docker build -t ubuntu-x86 -f Dockerfile . +docker build -t ubuntu-x86 -f Dockerfile . --platform=linux/amd64 ``` Run a shell on the Docker container: diff --git a/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/3-benchmark-gemma-i8mm.md b/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/3-benchmark-gemma-i8mm.md index b2185e204..a2cf409b3 100644 --- a/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/3-benchmark-gemma-i8mm.md +++ b/content/learning-paths/smartphones-and-mobile/kleidiai-on-android-with-mediapipe-and-xnnpack/3-benchmark-gemma-i8mm.md @@ -65,7 +65,7 @@ docker ps And then replace `[container ID]` in this command with your running container ID: ``` -docker cp [container ID]:/home/ubuntu/mediapipe/bazel-bin/mediapipe/tasks/cc/genai/inference/c/llm_test . +docker cp [container ID]:/home/ubuntu/mediapipe/bazel-bin/mediapipe/tasks/cc/genai/inference/utils/xnn_utils/llm_test . ``` You can then run diff --git a/content/learning-paths/smartphones-and-mobile/totalcompute/_index.md b/content/learning-paths/smartphones-and-mobile/totalcompute/_index.md index e166ec988..fe9f17790 100644 --- a/content/learning-paths/smartphones-and-mobile/totalcompute/_index.md +++ b/content/learning-paths/smartphones-and-mobile/totalcompute/_index.md @@ -2,6 +2,9 @@ title: Get started with Arm Total Compute draft: true +cascade: + draft: true + minutes_to_complete: 60 diff --git a/contributors.csv b/contributors.csv index a251190de..196c7c6a0 100644 --- a/contributors.csv +++ b/contributors.csv @@ -35,3 +35,4 @@ Varun Chari,Arm,,,, Adnan AlSinan,Arm,,,, Graham Woodward,Arm,,,, Basma El Gaabouri,Arm,,,, +Gayathri Narayana Yegna Narayanan,Arm,,,, diff --git a/data/stats_weekly_data.yml b/data/stats_weekly_data.yml index 1c1514996..ee6e7847c 100644 --- a/data/stats_weekly_data.yml +++ b/data/stats_weekly_data.yml @@ -3167,3 +3167,71 @@ avg_close_time_hrs: 0 num_issues: 12 percent_closed_vs_total: 0.0 +- a_date: '2024-08-19' + content: + cross-platform: 21 + embedded-systems: 18 + install-guides: 84 + laptops-and-desktops: 31 + microcontrollers: 24 + servers-and-cloud-computing: 77 + smartphones-and-mobile: 22 + total: 277 + contributions: + external: 38 + internal: 318 + github_engagement: + num_forks: 30 + num_prs: 10 + individual_authors: + arm: 3 + arnaud-de-grandmaison: 1 + basma-el-gaabouri: 1 + bolt-liu: 2 + brenda-strech: 1 + christopher-seidl: 7 + daniel-gubay: 1 + daniel-nguyen: 1 + david-spickett: 2 + dawid-borycki: 28 + diego-russo: 1 + diego-russo-and-leandro-nunes: 1 + elham-harirpoush: 2 + florent-lebeau: 5 + "fr\xE9d\xE9ric--lefred--descamps": 2 + gabriel-peterson: 5 + graham-woodward: 1 + james-whitaker,-arm: 1 + jason-andrews: 83 + johanna-skinnider: 2 + jonathan-davies: 2 + jose-emilio-munoz-lopez,-arm: 1 + julie-gaskin: 4 + julio-suarez: 5 + kasper-mecklenburg: 1 + konstantinos-margaritis: 7 + kristof-beyls: 1 + liliya-wu: 1 + mathias-brossard: 1 + michael-hall: 5 + owen-wu,-arm: 2 + pareena-verma: 35 + pareena-verma,-jason-andrews,-and-zach-lasiuk: 1 + pareena-verma,-joe-stech,-adnan-alsinan: 1 + pranay-bakre: 2 + przemyslaw-wirkus: 1 + roberto-lopez-mendez: 2 + ronan-synnott: 45 + thirdai: 1 + tom-pilar: 1 + uma-ramalingam: 1 + varun-chari: 1 + visualsilicon: 1 + ying-yu: 1 + ying-yu,-arm: 1 + zach-lasiuk: 1 + zhengjun-xing: 2 + issues: + avg_close_time_hrs: 0 + num_issues: 12 + percent_closed_vs_total: 0.0