diff --git a/.wordlist.txt b/.wordlist.txt index 12f6888c8..10571ce17 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -3380,4 +3380,44 @@ wiseeye wlcsp xB xmodem -yolov \ No newline at end of file +yolov +Dsouza +FGCT +GCT +GCs +GC’s +HNso +HeapRegionSize +HugePages +InitiatingHeapOccupancyPercent +JDKs +JVMs +LZMA +Lau +LuaJIT +NGFW +ParallelGCThreads +Preema +Roesch +Sourcefire +TPACKET +WebGPU’s +Whitepaper +YGCT +axion +callstack +et +gc +grubfile +jstat +mqF +netresec +parallelizing +profileable +profilers +ruleset +snortrules +techmahindra +unreferenced +uptime +wC \ No newline at end of file diff --git a/content/learning-paths/laptops-and-desktops/_index.md b/content/learning-paths/laptops-and-desktops/_index.md index 857b0f965..2053f4494 100644 --- a/content/learning-paths/laptops-and-desktops/_index.md +++ b/content/learning-paths/laptops-and-desktops/_index.md @@ -13,12 +13,12 @@ operatingsystems_filter: - ChromeOS: 1 - Linux: 29 - macOS: 7 -- Windows: 37 +- Windows: 38 subjects_filter: - CI-CD: 3 - Containers and Virtualization: 6 - Migration to Arm: 26 -- Performance and Architecture: 20 +- Performance and Architecture: 21 subtitle: Create and migrate apps for power efficient performance title: Laptops and Desktops tools_software_languages_filter: @@ -57,8 +57,8 @@ tools_software_languages_filter: - Neovim: 1 - Node.js: 3 - OpenCV: 1 -- perf: 2 -- Python: 2 +- perf: 3 +- Python: 3 - Qt: 2 - Remote.It: 1 - RME: 1 @@ -73,7 +73,7 @@ tools_software_languages_filter: - Windows Performance Analyzer: 1 - Windows Presentation Foundation: 1 - Windows Sandbox: 1 -- WindowsPerf: 3 +- WindowsPerf: 4 - WinUI 3: 1 - WSL: 1 - Xamarin Forms: 1 diff --git a/content/learning-paths/servers-and-cloud-computing/_index.md b/content/learning-paths/servers-and-cloud-computing/_index.md index 867377bee..bece087d1 100644 --- a/content/learning-paths/servers-and-cloud-computing/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/_index.md @@ -9,9 +9,9 @@ maintopic: true operatingsystems_filter: - Android: 2 - Baremetal: 1 -- Linux: 109 +- Linux: 111 - macOS: 9 -- Windows: 12 +- Windows: 13 pinned_modules: - module: name: Recommended getting started learning paths @@ -22,9 +22,9 @@ subjects_filter: - CI-CD: 4 - Containers and Virtualization: 25 - Databases: 15 -- Libraries: 6 +- Libraries: 7 - ML: 14 -- Performance and Architecture: 38 +- Performance and Architecture: 40 - Storage: 1 - Web: 10 subtitle: Optimize cloud native apps on Arm for performance and cost @@ -44,9 +44,10 @@ tools_software_languages_filter: - Assembly: 4 - assembly: 1 - AWS CodeBuild: 1 -- AWS EC2: 1 +- AWS EC2: 2 - AWS Elastic Container Service (ECS): 1 - AWS Elastic Kubernetes Service (EKS): 2 +- Bash: 1 - Bastion: 3 - BOLT: 1 - bpftool: 1 @@ -69,7 +70,7 @@ tools_software_languages_filter: - Flink: 1 - Fortran: 1 - FVP: 3 -- GCC: 18 +- GCC: 19 - gdb: 1 - Geekbench: 1 - GenAI: 5 @@ -83,7 +84,7 @@ tools_software_languages_filter: - InnoDB: 1 - Intrinsics: 1 - JAVA: 1 -- Java: 1 +- Java: 2 - JAX: 1 - Kafka: 1 - Keras: 1 @@ -105,9 +106,9 @@ tools_software_languages_filter: - Nginx: 3 - Node.js: 3 - PAPI: 1 -- perf: 3 +- perf: 4 - PostgreSQL: 4 -- Python: 12 +- Python: 13 - PyTorch: 5 - RAG: 1 - Redis: 3 @@ -116,6 +117,7 @@ tools_software_languages_filter: - Rust: 2 - snappy: 1 - Snort: 1 +- Snort3: 1 - SQL: 7 - Streamline CLI: 1 - Supervisor: 1 @@ -130,6 +132,7 @@ tools_software_languages_filter: - TypeScript: 1 - Vectorscan: 1 - Visual Studio Code: 3 +- WindowsPerf: 1 - WordPress: 3 - x265: 1 - zlib: 1 diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Example_application.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Example_application.md index 9b0d4664e..abb01708e 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Example_application.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Example_application.md @@ -1,14 +1,16 @@ --- title: Example Application -weight: 4 +weight: 5 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Example Application. +## Example Application -Using a file editor of your choice, copy the Java snippet below into a file named `HeapUsageExample.java`. This code example allocates 1 million string objects to fill up the heap. You can use this example to easily observe the effects of different GC tuning parameters. +Using a file editor of your choice, copy the Java snippet below into a file named `HeapUsageExample.java`. + +This code example allocates 1 million string objects to fill up the heap. You can use this example to easily observe the effects of different GC tuning parameters. ```java public class HeapUsageExample { @@ -32,9 +34,13 @@ public class HeapUsageExample { } ``` -### Enable GC logging +### Enable Garbage Collector logging + +To observe what the Garbage Collector is doing, one option is to enabling logging while the JVM is running. + +To enable this, you need to pass in some command-line arguments. The `gc` option logs the GC information. The `filecount` option creates a rolling log to prevent uncontrolled growth of logs with the drawback that historical logs might be rewritten and lost. -To observe what the GC is doing, one option is to enabling logging while the JVM is running. To enable this, you need to pass in some command-line arguments. The `gc` option logs the GC information. The `filecount` option creates a rolling log to prevent uncontrolled growth of logs with the drawback that historical logs may be rewritten and lost. Run the following command to enable logging with JDK 11 and higher: +Run the following command to enable logging with JDK 11 and higher: ```bash java -Xms512m -Xmx1024m -XX:+UseSerialGC -Xlog:gc:file=gc.log:tags,uptime,time,level:filecount=10,filesize=16m HeapUsageExample.java @@ -46,7 +52,7 @@ If you are using JDK8, use the following command instead: java -Xms512m -Xmx1024m -XX:+UseSerialGC -Xloggc:gc.log -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation HeapUsageExample.java ``` -The `-Xms512m` and `-Xmx1024` options create a minimum and maximum heap size of 512 MiB and 1GiB respectively. This is simply to avoid waiting too long to see activity within the GC. Additionally, you will force the JVM to use the serial garbage collector with the `-XX:+UseSerialGC` flag. +The `-Xms512m` and `-Xmx1024` options create a minimum and maximum heap size of 512 MiB and 1GiB respectively. This is to avoid waiting too long to see activity within the GC. Additionally, you can force the JVM to use the serial garbage collector with the `-XX:+UseSerialGC` flag. You will now see a log file, named `gc.log` created within the same directory. @@ -58,17 +64,19 @@ Open `gc.log` and the contents should look similar to: [2024-11-08T15:04:54.350+0000][0.759s][info][gc] GC(3) Pause Young (Allocation Failure) 139M->3M(494M) 3.699ms ``` -These logs provide insights into the frequency, duration, and impact of Young garbage collection events. The results may vary depending on your system. +These logs provide insights into the frequency, duration, and impact of Young garbage collection events. The results can vary depending on your system. - Frequency: ~ every 46 ms - Pause duration: ~ 3.6 ms - Reduction size: ~ 139 MB (or 3M objects) -This logging method can be quite verbose. Also, this method isn't suitable for a running process which makes debugging a live running application slightly more challenging. +This logging method can be quite verbose, and makes it challenging to debug a live running application. ### Use jstat to observe real-time GC statistics -Using a file editor of your choice, copy the java code below into a file named `WhileLoopExample.java`. This java code snippet is a long-running example that prints out a random integer and double precision floating point number 4 times a second. +Using a file editor of your choice, copy the java code below into a file named `WhileLoopExample.java`. + +This java code snippet is a long-running example that prints out a random integer and double precision floating point number four times a second: ```java import java.util.Random; @@ -91,7 +99,7 @@ public class GenerateRandom { // Print random double System.out.println("Random Doubles: " + rand_dub1); - // Sleep for 1 second (1000 milliseconds) + // Sleep for 1/4 second (250 milliseconds) try { Thread.sleep(250); } catch (InterruptedException e) { @@ -107,13 +115,15 @@ Start the Java program with the command below. This will use the default paramet ```bash java WhileLoopExample.java ``` -While the program running, open another terminal session. In the new terminal use the `jstat` command to print out the JVM statistics specifically related to the GC using the `-gcutil` flag: +While the program is running, open another terminal session. + +In the new terminal use the `jstat` command to print out the JVM statistics specifically related to the GC using the `-gcutil` flag: ```bash jstat -gcutil $(pgrep java) 1000 ``` -You will observe output like the following until `ctl+c` is pressed. +You will observe output like the following until `ctl+c` is pressed: ```output S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT @@ -125,10 +135,10 @@ You will observe output like the following until `ctl+c` is pressed. ``` The columns of interest are: -- **E (Eden Space Utilization)**: The percentage of the Eden space that is currently used. High utilization indicates frequent allocations and can trigger minor GCs. -- **O (Old Generation Utilization)**: The percentage of the Old (Tenured) generation that is currently used. High utilization can lead to Full GCs, which are more expensive. -- **YGCT (Young Generation GC Time)**: The total time (in seconds) spent in Young Generation (minor) GC events. High values indicate frequent minor GCs, which can impact performance. -- **FGCT (Full GC Time)**: The total time (in seconds) spent in Full GC events. High values indicate frequent Full GCs, which can significantly impact performance. -- **GCT (Total GC Time)**: The total time (in seconds) spent in all GC events (Young, Full, and Concurrent). This provides an overall view of the time spent in GC, helping to assess the impact on application performance. +- **E (Eden Space Utilization)**: The percentage of the Eden space that is being used. High utilization indicates frequent allocations and can trigger minor GCs. +- **O (Old Generation Utilization)**: The percentage of the Old (Tenured) generation that is being used. High utilization can lead to Full GCs, which are more expensive. +- **YGCT (Young Generation GC Time)**: The total time in seconds spent in Young Generation (minor) GC events. High values indicate frequent minor GCs, which can impact performance. +- **FGCT (Full GC Time)**: The total time in seconds spent in Full GC events. High values indicate frequent Full GCs, which can significantly impact performance. +- **GCT (Total GC Time)**: The total time in seconds spent in all GC events (Young, Full, and Concurrent). This provides an overall view of the time spent in GC, helping to assess the impact on application performance. diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Tuning Parameters.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Tuning Parameters.md index 0de271795..f6900c185 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Tuning Parameters.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Tuning Parameters.md @@ -1,6 +1,6 @@ --- title: Basic GC Tuning Options -weight: 5 +weight: 6 ### FIXED, DO NOT MODIFY layout: learningpathall @@ -8,7 +8,9 @@ layout: learningpathall ### Update the JDK version -If you are on an older version of JDK, a sensible first step is to use one of the latest long-term-support (LTS) releases of JDK. This is because the GC versions included with recent JDKs offer improvements. For example, the G1GC included with JDK 11 offers improvements in the pause time compared to JDK 8. As shown earlier, you can use the `java --version` command to check the version currently in use. +If you are on an older version of JDK, a sensible first step is to use one of the latest long-term-support (LTS) releases of JDK. This is because the GC versions included with recent JDKs offer improvements on previous releases. For example, the G1GC included with JDK 11 offers improvements in the pause time compared to JDK 8. + +As shown earlier, you can use the `java --version` command to check the version currently in use: ```output $ java --version @@ -22,25 +24,27 @@ OpenJDK 64-Bit Server VM Corretto-21.0.4.7.1 (build 21.0.4+7-LTS, mixed mode, sh In this section, you will use the `HeapUsageExample.java` file you created earlier. -The G1 GC (Garbage-First Garbage Collector) is designed to handle large heaps and aims to provide low pause times by dividing the heap into regions and performing incremental garbage collection. This makes it suitable for applications with high allocation rates and large memory footprints. +The Garbage-First Garbage Collector (G1GC) is designed to handle large heaps and aims to provide low pause times by dividing the heap into regions and performing incremental garbage collection. This makes it suitable for applications with high allocation rates and large memory footprints. + +You can run the following command to generate the GC logs using a different GC and compare the two. -You can run the following command to generate the GC logs using a different GC and compare. You just need to change the GC from `Serial` to `G1GC` using the `-XX:+UseG1GC` option as shown: +To make this comparison, change the Garbage Collector from `Serial` to `G1GC` using the `-XX:+UseG1GC` option: ```bash java -Xms512m -Xmx1024m -XX:+UseG1GC -Xlog:gc:file=gc.log:tags,uptime,time,level:filecount=10,filesize=16m HeapUsageExample.java ``` -From the created log file `gc.log`, you can observe that at a very similar time after start up (~0.75s), the Pause Young time reduced from ~3.6ms to ~1.9ms. Further, the time between GC pauses has improved from ~46ms to every ~98ms. +From the created log file `gc.log`, you can see that at a similar time after startup (~0.75s), the Pause Young time reduced from ~3.6ms to ~1.9ms. Further, the time between GC pauses has improved from ~46ms to every ~98ms. ```output [2024-11-08T16:13:53.088+0000][0.790s][info][gc ] GC(2) Pause Young (Normal) (G1 Evacuation Pause) 307M->3M(514M) 1.976ms ... [2024-11-08T16:13:53.186+0000][0.888s][info][gc ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) 307M->3M(514M) 1.703ms ``` -As discussed in the previous section, the performance improvement from moving to a G1GC will depend on the CPU overhead of your system. The performance may vary depending on the cloud instance size and available CPU resources. +As described in the previous section, the performance improvement from moving to a G1GC depends on the CPU overhead of your system. The performance can vary depending on the cloud instance size and available CPU resources. -### Add GC Targets +### Add Garbage Collector Targets -You can manually provide targets for specific metrics and the GC will attempt to meet those requirements. For example, if you have a time-sensitive application such as a REST server, you may want to ensure that all customers receive a response within a specific time. You may find that if a client request is sent during GC you need to ensure that the GC pause time is minimised. +You can manually provide targets for specific metrics and the GC will attempt to meet those requirements. For example, if you have a time-sensitive application such as a REST server, you might want to ensure that all customers receive a response within a specific time. You might find that if a client request is sent during Garbage Collection that you need to ensure that the GC pause time is minimized. Running the command with the `-XX:MaxGCPauseMillis=` sets a target max GC pause time: @@ -55,19 +59,19 @@ Looking at the output below, you can see that at the same initial state after ~0 [2024-11-08T16:27:37.149+0000][0.853s][info][gc] GC(19) Pause Young (Normal) (G1 Evacuation Pause) 193M->3M(514M) 0.482ms ``` -Here are some additional target options you can consider to tune performance: +Here are some additional target options that you can consider to tune performance: - -XX:InitiatingHeapOccupancyPercent: -Defines the old generation occupancy threshold to trigger a concurrent GC cycle. Adjusting this can be beneficial if your application experiences long GC pauses due to high old generation occupancy. For example, lowering this threshold can help start GC cycles earlier, reducing the likelihood of long pauses during peak memory usage. +This defines the old generation occupancy threshold to trigger a concurrent GC cycle. Adjusting this is beneficial if your application experiences long GC pauses due to high old generation occupancy. For example, lowering this threshold can help start GC cycles earlier, reducing the likelihood of long pauses during peak memory usage. - -XX:ParallelGCThreads -Specifies the number of threads for parallel GC operations. Increasing this value can be beneficial for applications running on multi-core processors, as it allows GC tasks to be processed faster. For instance, a high-throughput server application might benefit from more parallel GC threads to minimize pause times and improve overall performance. +This specifies the number of threads for parallel GC operations. Increasing this value is beneficial for applications running on multi-core processors, as it allows GC tasks to be processed faster. For instance, a high-throughput server application might benefit from more parallel GC threads to minimize pause times and improve overall performance. - -XX:G1HeapRegionSize -Determines the size of G1 regions, which must be a power of 2 between 1 MB and 32 MB. Adjusting this can be useful for applications with specific memory usage patterns. For example, setting a larger region size can reduce the number of regions and associated overhead for applications with large heaps, while smaller regions might be better for applications with more granular memory allocation patterns. +This determines the size of G1 regions, which must be a power of 2 between 1 MB and 32 MB. Adjusting this can be useful for applications with specific memory usage patterns. For example, setting a larger region size can reduce the number of regions and associated overhead for applications with large heaps, while smaller regions might be better for applications with more granular memory allocation patterns. -You can refer to [this technical article](https://www.oracle.com/technical-resources/articles/java/g1gc.html) for more information of G1GC tuning. +See [Garbage First Garbage Collector Tuning](https://www.oracle.com/technical-resources/articles/java/g1gc.html) for more information of G1GC tuning. diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_index.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_index.md index 98111756b..59701ecba 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_index.md @@ -3,16 +3,17 @@ title: Tune the Performance of the Java Garbage Collector minutes_to_complete: 45 -who_is_this_for: This learning path is designed for Java developers aiming to optimize application performance on Arm-based servers. It is especially valuable for those migrating applications from x86-based to Arm-based instances. +who_is_this_for: This Learning Path is for Java developers aiming to optimize application performance on Arm-based servers, especially those migrating applications from x86-based to Arm-based instances. learning_objectives: - - Understand the key differences among Java garbage collectors (GCs). - - Monitor and interpret GC performance metrics. + - Describe the key differences between individual Java Garbage Collectors (GCs). + - Monitor and interpret Garbage Collector performance metrics. - Adjust core parameters to optimize performance for your specific workload. prerequisites: - - An Arm based instance from a cloud service provider, or an on-premise Arm server. - - Basic understanding of Java and [Java installed](/install-guides/java/) on your machine. + - An Arm-based instance from a cloud service provider, or an on-premise Arm server. + - Basic understanding of Java. + - An [installation of Java](/install-guides/java/) on your machine. author_primary: Kieran Hejmadi diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_review.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_review.md index 2a8fd709a..c6293b7e5 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_review.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/_review.md @@ -2,30 +2,30 @@ review: - questions: question: > - What is the purpose of garbage collection? + What is the purpose of Garbage Collection? answers: - - To manage memory by automatically reclaiming unused objects - - To manually manage memory allocation + - To manage memory by automatically reclaiming unused objects. + - To manually manage memory allocation. correct_answer: 1 explanation: > - Garbage collection is used to manage memory by automatically reclaiming memory occupied by objects that are no longer in use, thus preventing memory leaks and optimizing memory usage. + Garbage Collection is used to manage memory by automatically reclaiming memory occupied by objects that are no longer in use, to prevent memory leaks and optimize memory usage. - questions: question: > - Which JVM flag can be used to enable detailed garbage collection logging? + Which JVM flag can you use to enable detailed garbage collection logging? answers: - - -XX:+UseG1GC - - -XX:+PrintGCDetails + - -XX:+UseG1GC. + - -XX:+PrintGCDetails. correct_answer: 2 explanation: > The flag -XX:+PrintGCDetails enables detailed logging of garbage collection events, which helps in monitoring and tuning the GC performance. - questions: question: > - Which garbage collector is best suited for applications requiring very low latency in a heavily multi-threaded application? + Which Garbage Collector is best suited for applications requiring very low latency in a heavily multi-threaded application? answers: - - Serial GC - - ZGC + - Serial GC. + - ZGC. correct_answer: 2 explanation: > ZGC (Z Garbage Collector) is designed for applications requiring very low latency, as it aims to keep pause times below 10 milliseconds even for large heaps. diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/different_gcs.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/different_gcs.md index dc26e4c2f..39dab8656 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/different_gcs.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/different_gcs.md @@ -1,46 +1,48 @@ --- -title: Types of GCs -weight: 3 +title: Types of Garbage Collector +weight: 4 ### FIXED, DO NOT MODIFY layout: learningpathall --- -In this section, you will explore the key differences among commonly used production GCs. You will learn about the advantages and disadvantages of each GC, along with guidance on selecting the best one for your Java application. +In this section, you will explore the key differences among commonly-used production GCs. You will learn about the advantages and disadvantages of each GC, along with guidance on selecting the best one for your Java application. ### Serial Garbage Collector -The Serial Garbage Collector (Serial GC) is a simple, single-threaded garbage collector, primarily designed for small applications or single-processor environments. Java’s heap is divided into two main regions, or generations—the young generation and the old generation—to help manage short-lived and long-lived objects differently. In the Serial GC, both the young and old generations are collected using a single-threaded, “stop-the-world” approach, where all application threads pause during garbage collection. This design can lead to noticeable application pauses, particularly as the heap size grows, making the Serial GC unsuitable for larger, latency-sensitive applications. +The Serial Garbage Collector (Serial GC) is a simple, single-threaded garbage collector, primarily designed for small applications or single-processor environments. As described earlier, Java’s heap is divided into two main generations, the new generation, to manage short-lived objects, and the old generation, to manage long-lived objects. -In production deployments, the Serial GC is rarely used in high-throughput or multi-threaded applications because it does not utilize the parallel processing capabilities of modern CPUs, resulting in longer pause times compared to other collectors. Its limitations make it inefficient for large-scale applications, where even brief pauses can disrupt user experience. However, for applications with limited memory and CPU resources or those needing a predictable, single-threaded execution model, the Serial GC remains a straightforward and low-overhead option. +In the Serial Garbage Collector, both the young and old generations are collected using a single-threaded, “stop-the-world” approach, where all application threads pause during garbage collection. This design can lead to noticeable application pauses, particularly as the heap size grows, making the Serial GC unsuitable for larger, latency-sensitive applications. + +In production deployments, the Serial GC is rarely used in high-throughput or multi-threaded applications as it does not utilize the parallel processing capabilities of modern CPUs, and so has longer pause times compared to other collectors. These limitations make it inefficient for large-scale applications, where even brief pauses can disrupt user experience. However, for applications with limited memory and CPU resources, or those needing a predictable, single-threaded execution model, the Serial GC remains a straightforward and low-overhead option. ### Throughput Garbage Collector -The Parallel GC, also called the Throughput GC, uses the same generational heap structure as the Serial GC, dividing memory into young and old generations to manage short-lived and long-lived objects. Unlike the Serial GC, the Parallel GC uses multiple threads for garbage collection, improving efficiency on larger heaps. When the young generation fills up, a young collection pause occurs, briefly pausing application threads to clear the young space. As shown in the diagram below, data in the young generation space is mostly freed, with surviving objects moved to the old generation. +The Parallel Garbage Collector, also called the Throughput Garbage Collector, uses the same generational heap structure as the Serial Garbage Collector, dividing memory into young and old generations to manage short-lived and long-lived objects. Unlike the Serial GC however, the Parallel GC uses multiple threads for Garbage Collection, which improves efficiency on larger heaps. When the young generation fills up, a young collection pause occurs, briefly pausing application threads to clear the young space. As shown in Figure 1, data in the young generation space is mostly freed, with surviving objects moved to the old generation. -Once the old generation is full, a full GC pause blocks all application threads for a longer duration to clean both generations. These full GC pauses can degrade performance in latency-sensitive applications, such as database management systems, where interruptions affect responsiveness. The Parallel GC’s multi-threaded approach helps reduce pause times, making it better suited for applications that prioritize throughput and can handle occasional longer pauses for full collection. +Once the old generation is full, a full GC pause blocks all application threads for a longer duration to clean both generations. These full GC pauses can degrade performance in latency-sensitive applications, such as database management systems, where interruptions affect responsiveness. The Parallel GC’s multi-threaded approach helps reduce pause times, making it better-suited to applications that prioritize throughput and can handle occasional longer pauses for full collection. -![throughput_minor_gc](./throughput_gc.jpg) +![throughput_minor_gc alt-text#center]( ./throughput_gc.jpg "Figure 1: Throughput Garbage Collector") ### Garbage First Garbage Collector (G1GC) -The G1GC is the default garbage collector starting from JDK version 11. G1 Garbage Collector (GC) works by dividing the heap into discrete regions, typically around 2,048 by default. These regions can be part of either the old or new generation and do not need to be contiguous. The purpose of having regions in the old generation is to allow concurrent background threads to identify and target regions with a higher concentration of unreferenced objects. The tradeoff of using concurrent threads is at the expense of slightly higher CPU utilisation. G1GC is most effective when there is at least ~20% unutilised CPU headroom. +From JDK Version 11, the G1GC is the default Garbage Collector. G1 Garbage Collector (GC) works by dividing the heap into discrete regions, typically around 2,048 by default. These regions can be part of either the old or new generation and do not need to be contiguous. The purpose of having regions in the old generation is to allow concurrent background threads to identify and target regions with a higher concentration of unreferenced objects. The trade-off of using concurrent threads is at the expense of slightly higher CPU utilization. G1GC is most effective when there is at least 20% unutilized CPU headroom. -Although collecting a region still necessitates pausing application threads, G1GC can prioritize regions with the most garbage, thereby minimizing the time spent on garbage collection. The result is that the pause times for full GC pauses is less compared to the throughput collector. The diagram below illustrates how the G1GC is divided into discrete chunks and how memory is freed. +Although collecting a region still necessitates pausing application threads, G1GC can prioritize regions with the most garbage, thereby minimizing the time spent on garbage collection. The result is that the pause times for full GC pauses is less compared to the throughput collector. Figure 2 illustrates how the G1GC is divided into discrete chunks and how memory is freed. -![g1gc](./g1gc.jpg) +![g1gc alt-text#center](./g1gc.jpg "Figure 2: Garbage First Garbage Collector") -### ZGC and Shenandoah GC +### ZGC and Shenandoah Garbage Collectors -Heap compaction time in Java garbage collection refers to the process of reorganizing live objects in memory to eliminate fragmentation. In the G1GC, heap compaction time is largely determined by the time spent relocating objects within memory, which requires pausing all application threads during the process. In contrast, the ZGC and Shenandoah GC can perform heap compaction concurrently while the application continues running, reducing pause times. ZGC and Shenandoah GCs use a form of locking to implement concurrent heap compaction in a light-weight manner. Starting from JDK version 15, ZGC became production ready. +Heap compaction time in Java Garbage Collection refers to the process of reorganizing live objects in memory to eliminate fragmentation. In the G1GC, heap compaction time is determined by the time spent relocating objects within memory, which requires pausing all application threads during the process. In contrast, the ZGC and Shenandoah Garbage Collectors can perform heap compaction concurrently while the application continues running, reducing pause times. ZGC and Shenandoah GCs use a form of locking to implement concurrent heap compaction in a lightweight manner. Starting from JDK version 15, ZGC became production-ready. -The ZGC and Shenandoah GC are particularly suited for applications that require ultra-low pause times and can benefit from concurrent garbage collection, making them ideal for large-scale, latency-sensitive applications like real-time analytics, trading systems, and other interactive services. By allowing heap compaction to occur concurrently, these collectors significantly reduce application pauses compared to G1GC, which pauses all threads during compaction. +The ZGC and Shenandoah Garbage Collectors are particularly suited for applications that require ultra-low pause times and can benefit from concurrent garbage collection, making them ideal for large-scale, latency-sensitive applications such as real-time analytics, trading systems, and other interactive services. By allowing heap compaction to occur concurrently, these collectors significantly reduce application pauses compared to G1GC, which pauses all threads during compaction. -However, the tradeoff with these collectors is a higher CPU overhead, as concurrent garbage collection requires additional processing while the application is running. +However, the trade-off with these collectors is a higher CPU overhead, as concurrent garbage collection requires additional processing while the application is running. ### Comparison Table -The following table can be used as a rough guide for your specific java application. +You can use the following table as an approximate guide for your specific java application. diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/optional_tuning.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/optional_tuning.md index f5acbdc5e..f6a8e6aea 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/optional_tuning.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/optional_tuning.md @@ -1,6 +1,6 @@ --- title: Intermediate GC Tuning Options -weight: 6 +weight: 7 ### FIXED, DO NOT MODIFY layout: learningpathall @@ -8,25 +8,29 @@ layout: learningpathall ## Optional Tuning Parameters -If you have an intermediate understanding of Java performance, you can experiment with the additional tuning options in this section to see how it impacts your applications performance. This is a non-exhaustive list. Please see the 'Next Steps' tab for further reading. +If you have an intermediate understanding of Java performance, you can experiment with the additional tuning options in this section to see how it impacts the performance of your application. This is a non-exhaustive list. See *Next Steps* section for further reading. ### Which adaptive heap sizing strategy is being used? -The JVM attempts to find an optimal sizing solution within the bounds of the policies and parameters through adaptive sizing, varying the generation and heap sizes dynamically during execution. This is on the assumption that historic GC cycles will be similar to future GC cycles. This is generally true. +The JVM attempts to find an optimal sizing solution within the bounds of the policies and parameters through adaptive sizing, varying the generation and heap sizes dynamically during execution. This assumes that historic GC cycles are similar to future GC cycles. This is generally true. -However, in specific cases where you have existing knowledge of the heap requirements, for example a small, short-lived java utility, disabling adaptive sizing using the flag shown below can avoid the small overhead and time taken to resize. Note the `-` before the `UseAdaptiveSizePolicy` disables this feature. +However, in specific cases where you have existing knowledge of the heap requirements, for example with a small, short-lived java utility, disabling adaptive sizing using the flag shown below can avoid the small overhead and time taken to resize. + +{{% notice Note%}} +The`-` before the `UseAdaptiveSizePolicy` disables this feature. +{{% /notice %}} ```bash -XX:-UseAdaptiveSizePolicy ``` -In JDK8, to observe how the JVM is resizing an application, set the `-XX:+PrintAdaptiveSizePolicy` to print the information on generation resizing in the GC log. +In JDK8, to observe how the JVM resizes an application, set the `-XX:+PrintAdaptiveSizePolicy` to print the information on generation resizing in the GC log. ### Is your GC NUMA aware? -Non-uniform memory architecture(NUMA) occurs when the memory performance varies depending on which core the application is running on and where the data is in memory. This is a common occurence if you are using a system with multiple sockets. If your system has multiple sockets you need to ensure the GC is aware of this to optimise memory access patterns. The `numactl` command line tool can be used to check if your system is of non-uniform memory architecture. +Non-Uniform Memory Architecture (NUMA) occurs when the memory performance varies depending on which core the application is running on and where the data is located in memory. This is a common occurrence if you are using a system with multiple sockets, where you need to ensure that the GC is aware of this to optimize memory access patterns. You can use the `numactl` command line tool to check if your system is of Non-Uniform Memory Architecture. -You can install `numactl` with your distribution's package manager, For example on Ubuntu, you can run `sudo apt-get install numactl`. +You can install `numactl` with your distribution's package manager. For example, on Ubuntu, you can run `sudo apt-get install numactl`. The command line option below can be used to enable NUMA-aware GC: @@ -35,23 +39,25 @@ The command line option below can be used to enable NUMA-aware GC: ``` -### Is the GC Heap Size Appropriate? +### Is the Garbage Collection Heap Size Appropriate? + +If the size of the heap is too small, excessive time is spent in GC compared to the application logic. However, disproportionately large heaps result in longer GC pauses as there is more memory to parse. You can use the `-Xmx ` and `-Xms ` options to specify the maximum and minimum memory sizes respectively. If you know the heap size required based on data, setting the minimum and maximum values slightly improves the performance since resizing never takes place. -If the size of the heap is too small, excessive time will be spent in GC compared to the application logic. However disproportionately large heaps will result in longer GC pauses as there is more memory to parse. The `-Xmx ` and `-Xms ` options can be used to specify the maximum and minimum memory sizes respectively. If you know the heap size required based on data, setting the minimum and maximum values will slightly improve the performance since resizing will never take place. +It is recommended that the max heap size is not greater that the physical memory on your system. If multiple JVMs are running, the sum of their heaps must not exceed the total physical memory (the `free -h` command can be used to find the physical memory). This is to avoid the high latency cost to access memory on disk from swapping during a full GC sweep. -It is recommended the max heap size is not greater that the physical memory on your system. If multiple JVMs are running the sum of their heaps must not exceed the total physical memory (the `free -h` command can be used to find the phyisical memory). This is to avoid the high latency accesses to access memory on disk from swapping during a full GC sweep. +Unfortunately, there is no hard rule on which values to set. However, a useful benchmark to apply is to aim for 30% occupancy of the heap after a full GC. This requires running the application until a steady state has been reached. -Unfortunately there is no hard rule on which values to set. However a rule of thumb is to aim for 30% occupancy of the heap after a full GC. This requires running the application until a steady state has been reached. +### Are the Garbage Collection generation sizes appropriate? -### Are the GC generation sizes appropriate? +Going a step further, garbage collectors (GCs) divide the heap into generations: young, survivor, and old. The young generation holds short-lived data, while the old generation holds long-lived data. This separation allows GCs to process the young generation more quickly, reducing pause times. It is recommended to hand-tune the generation sizes if you are an advanced java user. -Going a step further, garbage collectors (GCs) divide the heap into generations: young, survivor, and old. The young generation holds short-lived data, while the old generation holds long-lived data. This separation allows GCs to process the young generation more quickly, reducing pause times. It is recommended to hand tune the generation sizes if you are an advanced java user. As an example use case, in a Java application where startup performance is critical, tuning the young generation size can help. By increasing the young generation size, you can reduce the frequency of minor GCs during startup, leading to faster application initialization. +As an example use case, in a Java application where startup performance is critical, tuning the young generation size can help. By increasing the young generation size, you can reduce the frequency of minor GCs during startup, leading to faster application initialization. -Use the following command-line flag adjust the ratio of young to old generations from the default value of 2 for all GC algorithms: +Use the following command-line flag to adjust the ratio of young to old generations from the default value of 2 for all GC algorithms: ```bash -XX:NewRatio= ``` -Additionally, the initial size and maximum size of the young generation can be modified with `-XX:NewSize` and `-XX:MaxNewSize` respectively. For more information, you can refer to the [factors affecting garbage collection performance](https://docs.oracle.com/en/java/javase/11/gctuning/factors-affecting-garbage-collection-performance.html#GUID-4ADBEDE9-5D52-4FBF-ADB2-431C3EB089C5) +Additionally, the initial size and maximum size of the young generation can be modified with `-XX:NewSize` and `-XX:MaxNewSize` respectively. For more information, see [Factors affecting Garbage Collection Performance](https://docs.oracle.com/en/java/javase/11/gctuning/factors-affecting-garbage-collection-performance.html#GUID-4ADBEDE9-5D52-4FBF-ADB2-431C3EB089C5). diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/purpose_of_gc.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/purpose_of_gc.md index ef8bb2e12..0da03bd03 100644 --- a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/purpose_of_gc.md +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/purpose_of_gc.md @@ -1,60 +1,34 @@ --- -title: Purpose of GC +title: Overview weight: 2 ### FIXED, DO NOT MODIFY layout: learningpathall --- -### The Purpose of GC +### Automatic Memory Management -Garbage collection (GC) is the term used for automatic memory management primarily within managed langauages such as Java. This means developers do not need to explicitly free variables once they're no longer required, such as in the C programming language. Java Virtual Machine distributions typically come with several GCs. The disadvantage is that Java has less control of memory growth which can cause knock-on effects such as page faults. Further, the automatic process of finding variables whose memory can be freed occurs CPU overhead that may run intermittently such as in the GC mark-swap algorithm. The execution of your Java application may pause during this time, controlling the length and frequency of these pauses can greatly improve performance. +Garbage Collection (GC) is the term used in programming to describe the concept and process of automatic memory management, primarily deployed within managed languages such as Java. -Broadly speaking, the GC has to do 3 main tasks; find the objects to free, free the memory and compact the heap. Most GCs further separate the heap into generations. -- **The young generation** holds data that is used for a short period. -- **The old generation** holds longer-lived data. +In a programming language such as C, developers need to explicitly free variables once they are no longer required. Automatic memory management removes the requirement for this procedure, meaning that there is less potential for human error. -This takes advantage of the fact that most data is short lived so it's faster to process just the young generation during GC, resulting in shorted pause times. A full GC refers to going through the entire heap, leading to the so called 'stop-the-world pauses' that impact your applications performance. +The Garbage Collector must perform three main tasks: -### Check the JDK version +* Find the objects to free. +* Free the memory. +* Compact the heap. -Different versions of the Java Development Kit (JDK) ship with different GCs. First, check the version of Java installed on your system by running the following command: +Java Virtual Machine distributions typically come with several Garbage Collectors, which can have the disadvantage that Java has less control of memory growth. This can subsequently cause knock-on effects such as page faults. In addition, the automatic process of finding variables with memory that can be freed creates CPU overhead, occurring during times such as the GC mark-swap algorithm. The execution of a Java application might pause during this process, and so being able to control the length and frequency of these pauses is key to optimizing performance. -```bash -java --version -``` +### Garbage Collection Generations -The output should look similar to: +Most Garbage Collectors separate the heap of the memory into generations: -```output -openjdk 21.0.4 2024-07-16 LTS -OpenJDK Runtime Environment Corretto-21.0.4.7.1 (build 21.0.4+7-LTS) -OpenJDK 64-Bit Server VM Corretto-21.0.4.7.1 (build 21.0.4+7-LTS, mixed mode, sharing) -``` +* The young generation holds data that is used for a short period. +* The old generation holds longer-lived data. -If the `java` command is not recognised, please follow the [Arm Java install guide](/install-guides/java/) to install Java on your system. +By doing this there are shorter pause times, as most data is short-lived and is faster to process. -### Check which GCs are available +A full Garbage Collections means going through the entire heap, leading to 'stop-the-world' pauses that impact the performance of an application. -Next, you want to understand which standard GCs are available to use. Run the following command to print the available GCs: -```bash -java -XX:+PrintFlagsFinal -version | egrep 'Use\w+GC' -``` - -The output below shows that 5 GCs are available to use. The middle column shows the default value. Here you can see that the `G1GC` GC is enabled. - -```output - bool UseAdaptiveSizeDecayMajorGCCost = true {product} {default} - bool UseAdaptiveSizePolicyWithSystemGC = false {product} {default} - bool UseDynamicNumberOfGCThreads = true {product} {default} - bool UseG1GC = true {product} {ergonomic} - bool UseMaximumCompactionOnSystemGC = true {product} {default} - bool UseParallelGC = false {product} {default} - bool UseSerialGC = false {product} {default} - bool UseShenandoahGC = false {product} {default} - bool UseZGC = false {product} {default} - -``` - -In the next section you will learn about the different types of GCs. diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/setup.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/setup.md new file mode 100644 index 000000000..982693cde --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/setup.md @@ -0,0 +1,51 @@ +--- +title: Setup +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- +### Check the JDK version + +Different versions of the Java Development Kit (JDK) ship with different Garbage Collectors. + +To check the version of Java installed on your system, run the following command: + +```bash +java --version +``` + +The output should look similar to: + +```output +openjdk 21.0.4 2024-07-16 LTS +OpenJDK Runtime Environment Corretto-21.0.4.7.1 (build 21.0.4+7-LTS) +OpenJDK 64-Bit Server VM Corretto-21.0.4.7.1 (build 21.0.4+7-LTS, mixed mode, sharing) +``` + +If the `java` command is not recognized, you can follow the [Arm Java install guide](/install-guides/java/) to install Java on your system. + +### Identify available Garbage Collectors + +To find out the range of standard Garbage Collectors that are available for you to use, run the following command which prints the information: + +```bash +java -XX:+PrintFlagsFinal -version | egrep 'Use\w+GC' +``` + +The example output below shows that five GCs are available to use. The middle column shows the default value. Here you can see that the `G1GC` GC is enabled: + +```output + bool UseAdaptiveSizeDecayMajorGCCost = true {product} {default} + bool UseAdaptiveSizePolicyWithSystemGC = false {product} {default} + bool UseDynamicNumberOfGCThreads = true {product} {default} + bool UseG1GC = true {product} {ergonomic} + bool UseMaximumCompactionOnSystemGC = true {product} {default} + bool UseParallelGC = false {product} {default} + bool UseSerialGC = false {product} {default} + bool UseShenandoahGC = false {product} {default} + bool UseZGC = false {product} {default} + +``` + +In the next section, you will learn about the different types of GCs. diff --git a/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/build-and-install.md b/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/build-and-install.md index eb67a093f..502f43755 100644 --- a/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/build-and-install.md +++ b/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/build-and-install.md @@ -88,7 +88,7 @@ installPackages() sudo apt-get update sudo apt-get install -y $LIST_OF_APPS - # required to get optimised result from Snort3 + # required to get optimized result from Snort3 downlaodPackages mkdir -p ${ROOT_DIR}/snort3 tar -xzf 3.3.5.0.tar.gz --directory ${ROOT_DIR}/snort3 --strip-components=1 diff --git a/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/usecase.md b/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/usecase.md index a9578446c..8d7507138 100644 --- a/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/usecase.md +++ b/content/learning-paths/servers-and-cloud-computing/snort3-multithreading/usecase.md @@ -252,7 +252,7 @@ For testing, you can use `--daq dump` to analyze PCAP files. ## Spawn Snort3 process with multithreading -To run Snort3 with multithread start from the `Test` directory. +To run Snort3 with multithreading start from the `Test` directory. ```bash cd $HOME/build/Test diff --git a/content/learning-paths/smartphones-and-mobile/_index.md b/content/learning-paths/smartphones-and-mobile/_index.md index 3184b831e..ace5d6e30 100644 --- a/content/learning-paths/smartphones-and-mobile/_index.md +++ b/content/learning-paths/smartphones-and-mobile/_index.md @@ -47,6 +47,7 @@ tools_software_languages_filter: - GoogleTest: 1 - Java: 4 - Kotlin: 4 +- LiteRT: 1 - llvm-mca: 1 - MediaPipe: 1 - Memory Bug Report: 1 @@ -61,7 +62,6 @@ tools_software_languages_filter: - Rust: 2 - SDDiskTool: 1 - SVE2: 1 -- tflite: 1 - Total Compute: 1 - Trusted Firmware: 1 - Unity: 6 diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_index.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_index.md index 09b49edbd..e31e5ab61 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_index.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_index.md @@ -1,20 +1,21 @@ --- -title: Profile the performance of ML models on Arm - -draft: true -cascade: - draft: true +title: Profile the Performance of AI and ML Mobile Applications on Arm minutes_to_complete: 60 -who_is_this_for: This is an introductory topic for software developers who want to learn how to profile the performance of their ML models running on Arm devices. +who_is_this_for: This is an introductory topic for software developers who want to learn how to profile the performance of Machine Learning (ML) models running on Arm devices. learning_objectives: - Profile the execution times of ML models on Arm devices. - Profile ML application performance on Arm devices. + - Describe how profiling can help optimize the performance of Machine Learning applications. prerequisites: - - An Arm-powered Android smartphone, and USB cable to connect with it. + - An Arm-powered Android smartphone, and a USB cable to connect to it. + - For profiling the ML inference, [Arm NN ExecuteNetwork](https://github.com/ARM-software/armnn/releases). + - For profiling the application, [Arm Performance Studio with Streamline](https://developer.arm.com/Tools%20and%20Software/Arm%20Performance%20Studio). + - Android Studio Profiler. + author_primary: Ben Clark @@ -28,7 +29,7 @@ armips: - Immortalis tools_software_languages: - Android Studio - - tflite + - LiteRT operatingsystems: - Android - Linux diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_review.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_review.md index 7eae5a8b1..451c2b044 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_review.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/_review.md @@ -4,35 +4,35 @@ review: question: > Streamline Profiling lets you profile: answers: - - Arm CPU activity - - Arm GPU activity - - when your Neural Network is running - - All of the above + - Arm CPU activity. + - Arm GPU activity. + - When your Neural Network is running. + - All of the above. correct_answer: 4 explanation: > - Streamline will show you CPU and GPU activity (and a lot more counters!), and if Custom Activity Maps are used, you can see when your Neural Network and other parts of your application are running. + Streamline shows you CPU and GPU activity (and a lot more counters!) and if Custom Activity Maps are used, you can see when your Neural Network and other parts of your application are running. - questions: question: > Does Android Studio have a profiler? answers: - - "Yes" - - "No" + - "Yes." + - "No." correct_answer: 1 explanation: > - Yes, Android Studio has a built-in profiler that can be used to monitor the memory usage of your app among other things + Yes, Android Studio has a built-in profiler that can be used to monitor the memory usage of your application, amongst other functions. - questions: question: > Is there a way to profile what is happening inside your Neural Network? answers: - - Yes, Streamline just shows you out of the box - No. - - Yes, ArmNN's ExecuteNetwork can do this - - Yes, Android Studio Profiler can do this + - Yes, Streamline just shows you out of the box. + - Yes, Arm NN ExecuteNetwork can do this. + - Yes, Android Studio Profiler can do this. correct_answer: 3 explanation: > - Standard profilers don't have an easy way to see what is happening inside an ML framework to see a model running inside it. ArmNN's ExecuteNetwork can do this for TensorFlow Lite models, and ExecuTorch has tools that can do this for PyTorch models. + Standard profilers do not have an easy way to see what is happening inside an ML framework to see a model running inside it. Arm NN ExecuteNetwork can do this for LiteRT models, and ExecuTorch has tools that can do this for PyTorch models. diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-android-studio.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-android-studio.md index 9f8508f3a..4c675b238 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-android-studio.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-android-studio.md @@ -7,39 +7,72 @@ layout: learningpathall --- ## Android Memory Profiling -Memory is often a problem in ML, with ever bigger models and data. For profiling an Android app's memory, Android Studio has a built-in profiler. This can be used to monitor the memory usage of your app, and to find memory leaks. +Memory is a common problem in ML, with ever-increasing model parameters and datasets. For profiling an Android app's memory, Android Studio has a built-in profiler. You can use this to monitor the memory usage of your app, and to detect memory leaks. -To find the Profiler, open your project in Android Studio and click on the *View* menu, then *Tool Windows*, and then *Profiler*. This opens the Profiler window. Attach your device in Developer Mode with a USB cable, and then you should be able to select your app's process. Here there are a number of different profiling tasks available. +### Set up the Profiler -Most likely with an Android ML app you'll need to look at memory both from the Java/Kotlin side and the native side. The Java/Kotlin side is where the app runs, and may be where buffers are allocated for input and output if, for example, you're using LiteRT (formerly known as TensorFlow Lite). The native side is where the ML framework will run. Looking at the memory consumption for Java/Kotlin and native is 2 separate tasks in the Profiler: *Track Memory Consumption (Java/Kotlin Allocations)* and *Track Memory Consumption (Native Allocations)*. +* To find the Profiler, open your project in Android Studio, and select the **View** menu. -Before you start either task, you have to build your app for profiling. The instructions for this and for general profiling setup can be found [here](https://developer.android.com/studio/profile). You will want to start the correct profiling version of the app depending on the task. +* Next, click **Tool Windows**, and then **Profiler**. This opens the Profiler window. -![Android Studio profiling run types alt-text#center](android-profiling-version.png "Figure 1. Profiling run versions") +* Attach your device in Developer Mode with a USB cable, and then select your app's process. There are a number of different profiling tasks available. -For the Java/Kotlin side, you want the **debuggable** "Profile 'app' with complete data", which is based off the debug variant. For the native side, you want the **profileable** "Profile 'app' with low overhead", which is based off the release variant. +Most likely with an Android ML app you will need to look at memory both from the Java/Kotlin side, and the native side: + +* The Java/Kotlin side is where the app runs, and might be where buffers are allocated for input and output if, for example, you are using LiteRT. +* The native side is where the ML framework runs. + +{{% notice Note %}} +Before you start either task, you must build your app for profiling. The instructions for this, and for general profiling setup can be found at [Profile your app performance](https://developer.android.com/studio/profile) on the Android Studio website. You need to start the correct profiling version of the app depending on the task. +{{% /notice %}} + +Looking at the memory consumption for Java/Kotlin and native, there are two separate tasks in the Profiler: + +* **Track Memory Consumption (Java/Kotlin Allocations)**. +* **Track Memory Consumption (Native Allocations)**. + +![Android Studio profiling run types alt-text#center](android-profiling-version.png "Figure 3: Profiling Run Versions") + +For the Java/Kotlin side, select **Profile 'app' with complete data**, which is based off the debug variant. For the native side, you want the **profileable** "Profile 'app' with low overhead", which is based off the release variant. ### Java/Kotlin -If you start looking at the [Java/Kotlin side](https://developer.android.com/studio/profile/record-java-kotlin-allocations), choose *Profiler: Run 'app' as debuggable*, and then select the *Track Memory Consumption (Java/Kotlin Allocations)* task. Navigate to the part of the app you wish to profile and then you can start profiling. At the bottom of the Profiling window it should look like Figure 2 below. Click *Start Profiler Task*. +To investigate the Java/Kotlin side, see the notes on [Record Java/Kotlin allocations](https://developer.android.com/studio/profile/record-java-kotlin-allocations). + +Select **Profiler: Run 'app' as debuggable**, and then select the **Track Memory Consumption (Java/Kotlin Allocations)** task. + +Navigate to the part of the app that you would like to profile, and then you can start profiling. -![Android Studio Start Profile alt-text#center](start-profile-dropdown.png "Figure 2. Start Profile") +The bottom of the profiling window should resemble Figure 4. -When you're ready, *Stop* the profiling again. Now there will be a nice timeline graph of memory usage. While Android Studio has a nicer interface for the Java/Kotlin side than the native side, the key to the timeline graph may be missing. This key is shown below in Figure 3, so you can refer to the colors from this. -![Android Studio memory key alt-text#center](profiler-jk-allocations-legend.png "Figure 3. Memory key for the Java/Kotlin Memory Timeline") +![Android Studio Start Profile alt-text#center](start-profile-dropdown.png "Figure 4: Start Profile") -The default height of the Profiling view, as well as the timeline graph within it is usually too small, so adjust these heights to get a sensible graph. You can click at different points of the graph to see the memory allocations at that time. If you look according to the key you can see how much memory is allocated by Java, Native, Graphics, Code etc. +Click **Start profiler task**. -Looking further down you can see the *Table* of Java/Kotlin allocations for your selected time on the timeline. With ML a lot of your allocations are likely to be byte[] for byte buffers, or possibly int[] for image data, etc. Clicking on the data type will open up the particular allocations, showing their size and when they were allocated. This will help to quickly narrow down their use, and whether they are all needed etc. +When you're ready, select *Stop* to stop the profiling again. + +Now there will be a timeline graph of memory usage. While Android Studio has a more user-friendly interface for the Java/Kotlin side than the native side, the key to the timeline graph might be missing. This key is shown in Figure 3. + +![Android Studio memory key alt-text#center](profiler-jk-allocations-legend.png "Figure 3: Memory key for the Java/Kotlin Memory Timeline") + +If you prefer, you can adjust the default height of the profiling view, as well as the timeline graph within it, as they are usually too small. + +Now click on different points of the graph to see the memory allocations at each specific time. Using the key on the graph, you can see how much memory is allocated by different categories of consumption, such as Java, Native, Graphics, and Code. + +If you look further down, you can see the **Table** of Java/Kotlin allocations for your selected time on the timeline. With ML, many of your allocations are likely to be scenarios such as byte[] for byte buffers, or possibly int[] for image data. Clicking on the data type opens up the particular allocations, showing their size and when they were allocated. This will help to quickly narrow down their use, and whether they are all needed. ### Native -For the [native side](https://developer.android.com/studio/profile/record-native-allocations), the process is similar but with different options. Choose *Profiler: Run 'app' as profileable*, and then select the *Track Memory Consumption (Native Allocations)* task. Here you have to *Start profiler task from: Process Start*. Choose *Stop* once you've captured enough data. +For the [native side](https://developer.android.com/studio/profile/record-native-allocations), the process is similar but with different options. Select **Profiler: Run 'app' as profileable**, and then select the **Track Memory Consumption (Native Allocations)** task. Here you have to **Start profiler task from: Process Start**. Select **Stop** once you've captured enough data. -The Native view doesn't have the same nice timeline graph as the Java/Kotlin side, but it does have the *Table* and *Visualization* tabs. The *Table* tab no longer has a list of allocations, but options to *Arrange by allocation method* or *callstack*. Choose *Arrange by callstack* and then you can trace down which functions were allocating significant memory. Potentially more useful, you can also see Remaining Size. +The Native view does not provide the same kind of timeline graph as the Java/Kotlin side, but it does have the **Table** and **Visualization** tabs. The **Table** tab no longer has a list of allocations, but options to **Arrange by allocation method** or **callstack**. Select **Arrange by callstack** and then you can trace down which functions allocate significant memory resource. There is also the **Remaining Size** tab, which is arguably more useful. -In the Visualization tab you can see the callstack as a graph, and once again you can look at total Allocations Size or Remaining Size. If you look at Remaining Size, you can see what is still allocated at the end of the profiling, and by looking a few steps up the stack, probably see which allocations are related to the ML model, by seeing functions that relate to the framework you are using. A lot of the memory may be allocated by that framework rather than in your code, and you may not have much control over it, but it is useful to know where the memory is going. +In the **Visualization** tab, you can see the callstack as a graph, and once again you can look at total **Allocations Size** or **Remaining Size**. If you look at **Remaining Size**, you can see what remains allocated at the end of the profiling, and by looking a few steps up the stack, probably see which allocations are related to the ML model, by seeing functions that relate to the framework you are using. A lot of the memory may be allocated by that framework rather than in your code, and you may not have much control over it, but it is useful to know where the memory is going. ## Other platforms -On other platforms, you will need a different memory profiler. The objective of working out where the memory is being used is the same, and whether there are issues with leaks or just too much memory being used. There are often trade-offs between memory and speed, and they can be considered more sensibly if the numbers involved are known. +On other platforms, you will need a different memory profiler. The objective is the same; to investigate memory consumption in terms of identifying whether there are issues with leaks or if there is too much memory being used. + +There are often trade-offs between memory and speed, and investigating memory consumption provides data that can help inform assessments of this balance. + + diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-streamline.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-streamline.md index e55e4e172..c72893edb 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-streamline.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/app-profiling-streamline.md @@ -7,51 +7,73 @@ layout: learningpathall --- ## Application Profiling -Application profiling can be split into 2 main types - *Instrumentation* and *Sampling*. [Streamline](https://developer.arm.com/Tools%20and%20Software/Streamline%20Performance%20Analyzer), for example, is a sampling profiler, that takes regular samples of various counters and registers in the system to provide a detailed view of the system's performance. Sampling will only provide a statistical view, but it is less intrusive and has less processing overhead than instrumentation. +Application profiling can be split into two main types: -The profiler can look at memory, CPU activity and cycles, cache misses, and many parts of the GPU as well as other performance metrics. It can also provide a timeline view of these counters to show the application's performance over time. This will show bottlenecks, and help you understand where to focus your optimization efforts. +* Sampling. +* Instrumentation. -![Streamline image alt-text#center](Streamline.png "Figure 1. Streamline timeline view") +[Streamline](https://developer.arm.com/Tools%20and%20Software/Streamline%20Performance%20Analyzer)is an example of a sampling profiler that takes regular samples of various counters and registers in the system to provide a detailed view of the system's performance. -## Example Android Application +Whilst sampling only provides a statistical view, it is less intrusive and has less processing overhead than instrumentation. -In this Learning Path, you will use profile [an example Android application](https://github.com/dawidborycki/Arm.PyTorch.MNIST.Inference) using Streamline. -Start by cloning the repository containing this example on your machine and open it in a recent Android Studio. It is generally safest to not update the Gradle version when prompted. +The profiler looks at performance metrics such as memory, CPU activity and cycles, cache misses, and many parts of the GPU. + +It can also provide a timeline-view of these counters to show any changes in the application's performance, which can reveal bottlenecks, and help you to identify where to focus your optimization efforts. + +![Streamline image alt-text#center](Streamline.png "Figure 1. Streamline Timeline View") + +## Get started with an example Android Application + +In this Learning Path, you will profile [an example Android application](https://github.com/dawidborycki/Arm.PyTorch.MNIST.Inference) using Streamline. + +Start by cloning the repository containing this example on your machine, then open it in a recent version of Android Studio. + +{{% notice Note %}} +It is generally safest to not update the Gradle version when prompted. +{{% /notice %}} ## Streamline -You will install Streamline and Performance Studio on your host machine and connect to your target Arm device to capture the data. In this example, the target device is an Arm-powered Android phone. The data is captured over a USB connection, and then analyzed on your host machine. +Now you can install Streamline and Arm Performance Studio on your host machine and connect to your target Arm device to capture the data. + +In this example, the target device is an Arm-powered Android phone. The data is captured over a USB connection, and then analyzed on your host machine. + +For more information on Streamline usage, see [Tutorials and Training Videos](https://developer.arm.com/Tools%20and%20Software/Arm%20Performance%20Studio). -For more details on Streamline usage you can refer to these [tutorials and training videos](https://developer.arm.com/Tools%20and%20Software/Arm%20Performance%20Studio). While the example you are running is based on Android, you can use [the setup and capture instructions for Linux](https://developer.arm.com/documentation/101816/0903/Getting-started-with-Streamline/Profile-your-Linux-application). +While the example that you are running is based on Android, you can also run it on Linux. See [Setup and Capture Instructions for Linux](https://developer.arm.com/documentation/101816/0903/Getting-started-with-Streamline/Profile-your-Linux-application). -First, follow these [setup instructions](https://developer.arm.com/documentation/102477/0900/Setup-tasks?lang=en), to make sure you have `adb` (Android Debug Bridge) installed. If you have installed [Android Studio](https://developer.android.com/studio), you will have installed adb already. Otherwise, you can get it as part of the Android SDK platform tools [here](https://developer.android.com/studio/releases/platform-tools.html). +### Installation -Make sure `adb` is in your path. You can check this by running `adb` in a terminal. If it is not in your path, you can add it by installing the [Android SDK `platform-tools`](https://developer.android.com/tools/releases/platform-tools#downloads) directory to your path. +Firstly, follow these [Setup Instructions](https://developer.arm.com/documentation/102477/0900/Setup-tasks?lang=en), to make sure you have `adb` (Android Debug Bridge) installed. If you have installed [Android Studio](https://developer.android.com/studio), you will have adb installed already. Otherwise, you can get it as part of the Android SDK platform tools which can be found on the [SDK Platform Tools Release Notes page](https://developer.android.com/studio/releases/platform-tools.html). + +Make sure `adb` is in your path. You can check this by running `adb` in a terminal. If it is not in your path, you can add it by installing the SDK platform tools from the [SDK Platform Tools Release Notes Downloads page](https://developer.android.com/tools/releases/platform-tools#downloads). Next, install [Arm Performance Studio](https://developer.arm.com/Tools%20and%20Software/Arm%20Performance%20Studio#Downloads), which includes Streamline. -Connect your Android phone to your host machine through USB. Ensure that your Android phone is set to [Developer mode](https://developer.android.com/studio/debug/dev-options). +Connect your Android phone to your host machine through USB. Ensure that your Android phone is set to developer mode. For more information on how to do this, see [Configure on-device developer options](https://developer.android.com/studio/debug/dev-options). + +On your phone, navigate to **Settings**, then **Developer Options**. Enable **USB Debugging**. If your phone requests authorization for connection to your host machine, confirm authorization. Test the connection by running `adb devices` in a terminal. You will see your device ID listed. -On your phone, go to `Settings > Developer Options` and enable USB Debugging. If your phone asks you to authorize connection to your host machine, confirm this. Test the connection by running `adb devices` in a terminal. You should see your device ID listed. +Next, you need a debuggable build of the application that you want to profile. +- In Android Studio, ensure your **Build Variant** is set to **debug**. You can then build the application, and install it on your device. +- For a Unity app, select **Development Build** in the **Build Settings** menu under **File**, when building your application. +- In Unreal Engine, expand the navigation menu **Project Settings** > **Project** > **Packaging** > **Project**, and ensure that the **For Distribution** checkbox is clear. +- You can set `android:debuggable=true` in the application manifest file. -Next, you need a debuggable build of the application you want to profile. -- In Android Studio, ensure your *Build Variant* is set to `debug`. You can then build the application and install it on your device. -- For a Unity app, select Development Build under File > Build Settings when building your application. -- In Unreal Engine, open Project Settings > Project > Packaging > Project, and ensure that the For Distribution checkbox is not set. -- In the general case, you can set `android:debuggable=true` in the application manifest file. +For the example application that you cloned earlier, the Build Variant is `debug` by default, but you can verify this by going to **Build** > **Select Build Variant** in Android Studio. -For the example application that you cloned earlier, the Build Variant is `debug` by default, but you can verify this by going to `Build > Select Build Variant` in Android Studio. Build and install this application on your device. +Build and install this application on your device. -You can now run Streamline and [capture a profile](https://developer.arm.com/documentation/102477/0900/Capture-a-profile?lang=en) of your application. But before you do, lets add some useful annotations to your code that can help with more specific performance analysis of your application. +You are now able to run Streamline and capture a profile of your application by following the instructions [Capture a profile](https://developer.arm.com/documentation/102477/0900/Capture-a-profile?lang=en). But before you do, you can add some useful annotations to your code that enables specific performance analysis of your application. ## Custom Annotations -In Streamline, it is possible to add custom annotations to the timeline view. This can be useful to mark the start and end of specific parts of your application, or to mark when a specific event occurs. This can help you understand the performance of your application in relation to these events. At the bottom of *Figure 1* above there are custom annotations to show when inference, pre-processing, and post-processing are happening. +In Streamline, it is possible to add custom annotations to the timeline view. This can be useful to mark the start and end of parts of your application, or to mark when a specific event occurs. This then allows you to view the performance of your application in relation to these events. At the bottom of *Figure 1* there are custom annotations to show when inference, pre-processing, and post-processing occur. -To add annotations, you will need to add some files into your project from the **gator** daemon that Streamline uses. These files are named `streamline_annotate.c`, `streamline_annotate.h` and `streamline_annotate_logging.h` and made available [here](https://github.com/ARM-software/gator/tree/main/annotate). Using these annotations, you will be able to show log strings, markers, counters and Custom Activity Maps. WIthin your example project, create a `cpp` folder under the `app/src/main` folder, and add these three files there. +To add annotations, you will need to add some files into your project from the **gator** daemon that Streamline uses. These files are named `streamline_annotate.c`, `streamline_annotate.h`, and `streamline_annotate_logging.h` and made available at [this GitHub repository](https://github.com/ARM-software/gator/tree/main/annotate). Using these annotations, you can see log strings, markers, counters, and Custom Activity Maps. Within your example project, create a `cpp` folder under the `app/src/main` folder, and add these three files there. -These files are written in C, so if your Android Studio project is in Java or Kotlin, you will need to add a C library to your project. This is slightly trickier than just adding a Java or Kotlin file, but it is not difficult. You can find instructions on how to do this [here](https://developer.android.com/studio/projects/add-native-code). +These files are written in C, so if your Android Studio project is in Java or Kotlin, you will need to add a C library to your project. This is slightly trickier than adding a Java or Kotlin file, but it is not difficult. You can find instructions on how to do this at a page called [Add C and C++ code to your project](https://developer.android.com/studio/projects/add-native-code). -Create a file in the `app/src/main/cpp/` folder under your project and name it `annotate_jni_wrapper.c`. This will be a wrapper around the gator daemon's functions, and will be called from your Kotlin code. Copy the code below into this file. You can also create very similar wrapper functions for other gator daemon functions. +Create a file in the `app/src/main/cpp/` folder under your project, and name it `annotate_jni_wrapper.c`. This will be a wrapper around the gator daemon's functions, and will be called from your Kotlin code. Copy the code below into this file. You can also create similar wrapper functions for other gator daemon functions. ```c #include @@ -66,7 +88,7 @@ JNIEXPORT jlong JNICALL Java_AnnotateStreamline_GetTime(JNIEnv* env, jobject obj } ``` -Some functions have `unsigned int`, but that needs to be a `jint` in the wrapper, with some casting required in your Kotlin code to enforce type correctness at that end. Some functions have strings as arguments, and you will need to do a small conversion as shown below: +Some functions have `unsigned int`, but this needs to be a `jint` in the wrapper, with some casting required in your Kotlin code to enforce type correctness at that end. Some functions have strings as arguments, and you will need to do a small conversion as shown below: ```c JNIEXPORT void JNICALL Java_AnnotateStreamline_AnnotateMarkerColorStr(JNIEnv* env, jobject obj, jint color, jstring str) { @@ -76,7 +98,7 @@ JNIEXPORT void JNICALL Java_AnnotateStreamline_AnnotateMarkerColorStr(JNIEnv* en } ``` -In Android Studio `cmake` is used to create your C library, so you will need a `CMakelists.txt` file in the same directory as the C files (`app/src/main/cpp/` in the example). Copy the contents shown below into `CMakelists.txt`: +In Android Studio, `cmake` is used to create your C library, so you will need a `CMakelists.txt` file in the same directory as the C files (`app/src/main/cpp/` in the example). Copy the contents shown below into `CMakelists.txt`: ```cmake # Sets the minimum CMake version required for this project. @@ -112,7 +134,13 @@ Now add the code below to the `build.gradle` file of the Module you wish to prof } ``` -This will create a `libStreamlineAnnotationJNI.so` library that you can load in your Kotlin code, and then you can call the functions. Here you will create a singleton `AnnotateStreamline.kt`. Place the file alongside `MainActivity.kt` in `app\src\main\java\com\arm\armpytorchmnistinference` for the example. Add the following code to `AnnotateStreamline.kt` to enable Kotlin calls to the gator daemon from the rest of your code: +This creates a `libStreamlineAnnotationJNI.so` library that you can load in your Kotlin code, and then you can call the functions. + +In this location you can now create a singleton `AnnotateStreamline.kt`. + +Place the file alongside `MainActivity.kt` in `app\src\main\java\com\arm\armpytorchmnistinference` for the example. + +Add the following code to `AnnotateStreamline.kt` to enable Kotlin calls to the gator daemon from the rest of your code: ```kotlin // Kotlin wrapper class for integration into Android project @@ -164,23 +192,27 @@ class AnnotateStreamline { Fill in all the function calls to match the functions you added into `annotate_jni_wrapper.c`. -The `AnnotateStreamline` class can now be used in your Kotlin code to add annotations to the Streamline timeline view. The first thing is to make sure `AnnotateStreamline.setup()` is called before any other gator functions. For the example project, add it into the `onCreate()` function of `MainActivity.kt`. Then you can add annotations like this: +You can now use the `AnnotateStreamline` class in your Kotlin code to add annotations to the Streamline timeline view. + +Firstly, make sure that `AnnotateStreamline.setup()` is called before any other gator function. + +For the example project, add it into the `onCreate()` function of `MainActivity.kt`. Then you can add annotations like this: ```kotlin AnnotateStreamline.annotateMarkerColorStr(AnnotateStreamline.ANNOTATE_BLUE, "Model Load") ``` -In the example app you could add this in the `onCreate()` function of `MainActivity.kt` after the `Module.load()` call to load the `model.pth`. +In the example app, you can add this in the `onCreate()` function of `MainActivity.kt` after the `Module.load()` call to load the `model.pth`. -This 'colored marker with a string' annotation will add the string and time to Streamline's log view, and look like the image shown below in Streamline's timeline (in the example app ArmNN isn't used, so there are no white ArmNN markers): +This *colored marker with a string* annotation will add the string and time to Streamline's log view, and it appears like the image shown below in Streamline's timeline (in the example app, Arm NN is not used, so there are no white Arm NN markers): ![Streamline image alt-text#center](streamline_marker.png "Figure 2. Streamline timeline markers") ## Custom Activity Maps (CAMs) -In addition to adding strings to the log and colored markers to the timeline, a particularly useful set of annotations is the Custom Activity Maps. These are the named colored bands you can see at the bottom of the Streamline timeline view shown in *Figure 1*. They can be used to show when specific parts of your application are running, such as the pre-processing or inference, and layered for functions within functions etc. +In addition to adding strings to the log and colored markers to the timeline, a particularly useful set of annotations is the Custom Activity Maps (CAMs). These are the named colored bands that you can see at the bottom of the Streamline timeline view, as shown in *Figure 1*. They can be used to show when specific parts of your application are running, such as the pre-processing or inference, and layered for functions within functions. -To add these you will need to import the functions that start `gator_cam_` from `streamline_annotate.h` through your wrapper files in the same way as the functions above. Then you can use CAMs, but first you will need to set up the tracks the annotations will appear on and an id system for each annotation. The `baseId` code below is to ensure that if you add annotations in multiple places in your code, the ids are unique. +To add these, in the same way as the functions above, you need to import the functions that are prefixed with `gator_cam_` from `streamline_annotate.h`. You can then use CAMs, but first you need to set up the tracks the annotations will appear on, and an ID system for each annotation. The `baseId` code below is used to ensure that if you add annotations in multiple places in your code, the IDs are unique. Here is an example setup in a class's companion object: @@ -201,7 +233,7 @@ Here is an example setup in a class's companion object: For the example app, add this to the `MainActivity` class. -Then it can be used like this: +Then you can use it in this way: ```kotlin val preprocess = currentId++ @@ -214,7 +246,7 @@ Then it can be used like this: AnnotateStreamline.camJobEnd(camViewId, preprocess, AnnotateStreamline.getTime()) ``` -In the example app, the CAM annotations are added to the `runInference()` function, which should look like this: +In the example app, the CAM annotations are added to the `runInference()` function, that looks like this: ```kotlin private fun runInference(bitmap: Bitmap) { @@ -244,6 +276,6 @@ In the example app, the CAM annotations are added to the `runInference()` functi } ``` -The example application is very fast and simple, so the CAMs will not show much information. In a more complex application you could add more CAMs, including child-level ones, to give more detailed annotations to show where time is spent in your application. For this example app with its very fast inference, it's best to change the Streamline timeline view scale to 10µs in order to see the CAM annotations better. +The example application is fast and simple, and the CAMs do not show a lot of information. In a more complex application, you can add further CAMs, including child-level ones, to give more detailed annotations to show where time is spent in your application. For this example app with its very fast inference, it is best to change the Streamline timeline view scale to 10µs in order to better see the CAM annotations. -Once you've added in useful CAM annotations, you can build and deploy a debug version of your application. You can run Streamline and see the annotations and CAMs in the timeline view. See the [Streamline documentation](https://developer.arm.com/documentation/101816/latest/) for how to make a capture for profiling. After the capture is made and analyzed, you will be able to see when your application is running the inference, ML pre-processing, ML post-processing, or other parts of your application. From there you can see where the most time is spent, and how hard the CPU or GPU is working during different parts of the application. From this you can then decide if work is needed to improve performance and where that work needs doing. +Once you have added in useful CAM annotations, you can build and deploy a debug version of your application. You can run Streamline and see the annotations and CAMs in the timeline view. See the [Streamline documentation](https://developer.arm.com/documentation/101816/latest/) for information on how to make a capture for profiling. After the capture is made and analyzed, you will be able to see when your application is running the inference, performing ML pre-processing or ML post-processing, or other operations from parts of your application. From there you can see where the most time is spent, and how hard the CPU or GPU is working during different parts of the application. From this you can then decide if work is needed to improve performance and where that work needs doing. diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-executenetwork.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-executenetwork.md index f4ca26994..1679673b2 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-executenetwork.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-executenetwork.md @@ -1,21 +1,27 @@ --- -title: ML profiling of a tflite model with ExecuteNetwork +title: ML profiling of a LiteRT model with ExecuteNetwork weight: 6 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## ArmNN's Network Profiler -One way of running tflite models is with ArmNN. This is available as a delegate to the standard tflite interpreter. But to profile the model, ArmNN comes with a command-line utility called `ExecuteNetwork`. This program just runs the model without the rest of the app. It is able to output layer timings and other useful information to let you know where there might be bottlenecks within your model. +## Arm NN Network Profiler +One way of running LiteRT models is to use Arm NN, which is open-source network machine learning (ML) software. This is available as a delegate to the standard LiteRT interpreter. But to profile the model, Arm NN comes with a command-line utility called `ExecuteNetwork`. This program runs the model without the rest of the app. It is able to output layer timings and other useful information to report where there might be bottlenecks within your model. -If you are using tflite without ArmNN, then the output from `ExecuteNetwork` will be more of an indication than a definitive answer. But it can still be useful to spot any obvious problems. +If you are using LiteRT without Arm NN, then the output from `ExecuteNetwork` is more of an indication than a definitive answer, but it can still be useful in identifying any obvious problems. -To try this out, you can download a tflite model from the [Arm Model Zoo](https://github.com/ARM-software/ML-zoo). In this Learning Path, you will download [mobilenet tflite](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/mobilenet_v2_1.0_224_INT8.tflite). +### Download a LiteRT Model -To get `ExecuteNetwork` you can download it from the [ArmNN GitHub](https://github.com/ARM-software/armnn/releases). Download the version appropriate for the Android phone you wish to test on - the Android version and the architecture of the phone. If you are unsure of the architecture, you can use a lower one, but you may miss out on some optimizations. Inside the `tar.gz` archive that you download, `ExecuteNetwork` is included. Note among the other release downloads on the ArmNN Github is the separate file for the `aar` delegate which is the easy way to include the ArmNN delegate into your app. +To try this out, you can download a LiteRT model from the [Arm Model Zoo](https://github.com/ARM-software/ML-zoo). Specifically for this Learning Path, you will download [mobilenet tflite](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/mobilenet_v2_1.0_224_INT8.tflite). -To run `ExecuteNetwork` you'll need to use `adb` to push the model and the executable to your phone, and then run it from the adb shell. `adb` is included with Android Studio, but you may need to add it to your path. Android Studio normally installs it to a location like `\\AppData\Local\Android\Sdk\platform-tools`. `adb` can also be downloaded separately from the [Android Developer site](https://developer.android.com/studio/releases/platform-tools). +### Download and setup ExecuteNetwork + +You can download `ExecuteNetwork` from the [Arm NN GitHub](https://github.com/ARM-software/armnn/releases). Download the version appropriate for the Android phone that you are testing on, ensuring that it matches the Android version and architecture of the phone. If you are unsure of the architecture, you can use a lower one, but you might miss out on some optimizations.`ExecuteNetwork` is included inside the `tar.gz` archive that you download. Among the other release downloads on the Arm NN Github is a separate file for the `aar` delegate which you can also easily download. + +To run `ExecuteNetwork,` you need to use `adb` to push the model and the executable to your phone, and then run it from the adb shell. `adb` is included with Android Studio, but you might need to add it to your path. Android Studio normally installs it to a location such as: + + `\\AppData\Local\Android\Sdk\platform-tools`. `adb` can also be downloaded separately from the [Android Developer site](https://developer.android.com/studio/releases/platform-tools). Unzip the `tar.gz` folder you downloaded. From a command prompt, you can then adapt and run the following commands to push the files to your phone. The `/data/local/tmp` folder of your Android device is a place with relaxed permissions that you can use to run this profiling. @@ -25,9 +31,11 @@ adb push ExecuteNetwork /data/local/tmp/ adb push libarm_compute.so /data/local/tmp/ adb push libarmnn.so /data/local/tmp/ adb push libarmnn_support_library.so /data/local/tmp/ -# more ArmNN .so library files +# more Arm NN .so library files ``` -Push all the `.so` library files that are in the base folder of the `tar.gz` archive you downloaded, alongside `ExecuteNetwork`, and all the `.so` files in the `delegate` sub-folder. If you are using a recent version of Android Studio this copying can be done much more easily with drag and drop in the *Device Explorer > Files*. +Push all the `.so` library files that are in the base folder of the `tar.gz` archive you downloaded, alongside `ExecuteNetwork`, and all the `.so` files in the `delegate` sub-folder. + +If you are using a recent version of Android Studio this copying can be done much more easily with drag-and-drop in Android Studio in **Device Explorer > Files**. Then you need to set the permissions on the files: @@ -38,17 +46,21 @@ chmod 777 ExecuteNetwork chmod 777 *.so ``` -Now you can run ExecuteNetwork to profile the model. With the example tflite, you can use the following command: +### Run ExecuteNetwork to profile the model + +Now you can run ExecuteNetwork to profile the model. With the example LiteRT, you can use the following command: ```bash LD_LIBRARY_PATH=. ./ExecuteNetwork -m mobilenet_v2_1.0_224_INT8.tflite -c CpuAcc -T delegate --iterations 2 --do-not-print-output --enable-fast-math --fp16-turbo-mode -e --output-network-details > modelout.txt ``` -If you are using your own tflite, replace `mobilenet_v2_1.0_224_INT8.tflite` with the name of your tflite file. +If you are using your own LiteRT, replace `mobilenet_v2_1.0_224_INT8.tflite` with the name of your tflite file. + +This runs the model twice, outputting the layer timings to `modelout.txt`. The `--iterations 2` flag is the command that instructs it to run twice: the first run includes a lot of start-up costs and one-off optimizations, whilst the second run is more indicative of the level of performance. -This will run the model twice, outputting the layer timings to `modelout.txt`. The `--iterations 2` flag is the command that means it runs twice: the first run includes a lot of startup costs and one-off optimizations, so the second run is more indicative of the real performance. +The other flags to note are the `-e` and `--output-network-details` flags which output a lot of timeline information about the model, including the layer timings. The `--do-not-print-output` flag stops the output of the model, which can be very large, and without sensible input it is meaningless. The `--enable-fast-math` and `--fp16-turbo-mode` flags enable some math optimizations. `CpuAcc` is the accelerated CPU backend, and you can replace it with `GpuAcc` for the accelerated GPU backend. -The other flags to note are the `-e` and `--output-network-details` flags which will output a lot of timeline information about the model, including the layer timings. The `--do-not-print-output` flag will stop the output of the model, which can be very large, and without sensible input it is meaningless. The `--enable-fast-math` and `--fp16-turbo-mode` flags enable some math optimizations. `CpuAcc` is the acclerated CPU backend, it can be replaced with `GpuAcc` for the accelerated GPU backend. +### Analyze the output After running the model, you can pull the output file back to your host machine with the following commands: @@ -56,13 +68,13 @@ After running the model, you can pull the output file back to your host machine exit adb pull /data/local/tmp/modelout.txt ``` -Once again, this can be done with drag and drop in Android Studio's *Device Explorer > Files*. +Once again, you can do this with drag-and-drop in Android Studio in **Device Explorer > Files**. -Depending on the size of your model, the output will probably be quite large. You can use a text editor to view the file. The output is in JSON format, so you can use a JSON viewer to make it more readable. Usually some scripting can be used to extract the information you need more easily out of the very raw data in the file. +Depending on the size of your model, the output will probably be quite large. You can use a text editor to view the file. The output is in JSON format, so you can use a JSON viewer to make it more readable. Usually you can use some scripting to extract the information you need more easily out of the raw data in the file. -At the top is the summary, with the setup time and inference time of your 2 runs, which will look something like this: +At the top is the summary, with the setup time and inference time of the two runs, which look something like this: -```text +```output Info: ArmNN v33.2.0 Info: Initialization time: 7.20 ms. Info: ArmnnSubgraph creation @@ -78,8 +90,13 @@ Info: Execution time: 468.42 ms. Info: Inference time: 468.58 ms ``` -After the summary comes the graph of the model, then the layers and their timings from the second run. At the start of the layers there are a few optimizations and their timings recorded before the network itself. You can skip past the graph and the optimization timings to get to the part that needs analyzing. +After the summary, you will see: + +* The graph of the model. +* The layers and their timings from the second run. + +At the start of the layers, there are a few optimizations and their timings recorded before the network itself. You can skip past the graph and the optimization timings to get to the part that you need to analyze. -In the mobilenet example output, the graph is from lines 18 to 1629. After this is the optimization timings, which are part of the runtime, but not the network - these go until line 1989. Next there are a few wall clock recordings for the loading of the network, before the first layer "Convolution2dLayer_CreateWorkload_#18" at line 2036. Here is where the layer info that needs analyzing starts. +In the mobilenet example output, the graph is from lines 18 to 1629. After this are the optimization timings, which are part of the runtime, but not the network - these go until line 1989. Next there are a few wall clock recordings for the loading of the network, before the first layer "Convolution2dLayer_CreateWorkload_#18" at line 2036. This is where the layer information that requires analysis starts. -The layers' "Wall clock time" in microseconds shows how long they took to run. These layers and their timings can then be analyzed to see which layers, and which operators, took the most time. +The layers' wall-clock time in microseconds shows you how much time elapsed. You can then analyze these layers and timings to identify which layers and operators took the most time to run. diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-general.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-general.md index 91a35381f..bf64ce044 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-general.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/nn-profiling-general.md @@ -6,11 +6,13 @@ weight: 5 layout: learningpathall --- -## Profiling your model -App profilers will give you a good overall view of your performance, but often you might want to look inside the model and work out bottlenecks within the network. The network is often the bulk of the time, in which case it will warrant closer analysis. +## Tools that you can use +App profilers provide a good overall view of performance, but you might want to look inside the model and identify bottlenecks within the network. The network is often where the bulk of the bottlenecks lie, so it warrants closer analysis. -With general profilers this is hard to do, as there needs to be annotations inside the ML framework code to get the information. It is a large task to write the profiling annotations throughout the framework, so it is easier to use tools from a framework or inference engine that already has the required instrumentation. +With general profilers this is hard to do, as there needs to be annotation inside the ML framework code to retrieve the information. It is a complex task to write the profiling annotation throughout the framework, so it is easier to use tools from a framework or inference engine that already has the required instrumentation. -Depending on your model, your choice of tools will differ. For example, if you are using LiteRT (formerly TensorFlow Lite), Arm provides the ArmNN delegate that you can run with the model running on Linux or Android, CPU or GPU. ArmNN in turn provides a tool called `ExecuteNetwork` that can run the model and give you layer timings among other useful information. +Depending on the model you use, your choice of tools will vary. For example, if you are using LiteRT (formerly TensorFlow Lite), Arm provides the Arm NN delegate that you can run with the model running on Linux or Android, CPU or GPU. -If you are using PyTorch, you will probably use ExecuTorch the ons-device inference runtime for your Android phone. ExecuTorch has a profiler available alongside it. +Arm NN in turn provides a tool called ExecuteNetwork that can run the model and provide layer timings, amongst other useful information. + +If you are using PyTorch, you will probably use ExecuTorch, which is the on-device inference runtime for your Android phone. ExecuTorch has a profiler available alongside it. diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/plan.txt b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/plan.txt index 70e766717..6c2926ca3 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/plan.txt +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/plan.txt @@ -13,7 +13,7 @@ here's how to do that... Also Android Profiler, memory example Ml network, it will depend on the inference engine you are using -- here's an example for if you are using ArmNN with TFLite +- here's an example for if you are using Arm NN with TFLite - if you're not using it, it may still have some useful information, but different operators will be used and their performance will be different can see structure with netron or google model explorer to compare operators or different versions of networks may need to use a conversion tool to convert to TFLite (or whatever your inference engine wants) diff --git a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/why-profile.md b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/why-profile.md index 7d688a4ad..b1d4b7035 100644 --- a/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/why-profile.md +++ b/content/learning-paths/smartphones-and-mobile/profiling-ml-on-arm/why-profile.md @@ -1,23 +1,22 @@ --- -title: Why do you need to profile your ML application? +title: Why should you profile your ML application? weight: 2 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Performance -Working out what is taking the time and memory in your application is the first step to getting the performance you want. Profiling can help you identify the bottlenecks in your application and understand how to optimize it. +## Optimizing Performance +A first step towards achieving optimal performance in a ML Model is to identify what is consuming the most time and memory in your application. Profiling can help you identify the bottlenecks, and it can offer clues about how to optimize operations. -With Machine Learning (ML) applications, the inference of the Neural Network (NN) itself is often the heaviest part of the application in terms of computation and memory usage. This is not guaranteed however, so it is important to profile the application as a whole to see if pre- or post-processing or other code is an issue. +With Machine Learning (ML) applications, whilst the inference of the Neural Network (NN) is often the heaviest part of the application in terms of computation and memory usage, it is not necessarily always the case. It is therefore important to profile the application as a whole to detect other possible issues that can negatively impact performance, such as issues with pre- or post-processing, or the code itself. -In this Learning Path, you will profile an Android example using TFLite, but most of the steps shown will also work with Linux and cover a wide range of Arm devices. The principles for profiling your application are the same for use with other inference engines and platforms, but the tools are different. +In this Learning Path, you will profile an Android example using LiteRT. Most of the steps are transferable and work with Linux, and you can use them on a wide range of Arm devices. -## Tools +The principles for profiling an application apply to many other inference engines and platforms, only the tools differ. -You will need to use different tools to profile the ML inference or the application's performance running on your Arm device. +{{% notice Note %}} +LiteRT is the new name for TensorFlow Lite, or TFLite. +{{% /notice %}} -For profiling the ML inference, you will use [ArmNN](https://github.com/ARM-software/armnn/releases)'s ExecuteNetwork. - -For profiling the application as a whole, you will use [Arm Performance Studio](https://developer.arm.com/Tools%20and%20Software/Arm%20Performance%20Studio)'s Streamline, and the Android Studio Profiler. diff --git a/content/learning-paths/smartphones-and-mobile/totalcompute/_review.md b/content/learning-paths/smartphones-and-mobile/totalcompute/_review.md index 211a30b6c..b0530c3d8 100644 --- a/content/learning-paths/smartphones-and-mobile/totalcompute/_review.md +++ b/content/learning-paths/smartphones-and-mobile/totalcompute/_review.md @@ -28,10 +28,10 @@ review: - "Trusted firmware" - "Android" - "CMSIS" - - "ArmNN" + - "Arm NN" correct_answer: 3 explanation: > - The stack includes open-source code available from these upstream projects: SCP firmware, Trusted firmware, Linux kernel, Android, and ArmNN. + The stack includes open-source code available from these upstream projects: SCP firmware, Trusted firmware, Linux kernel, Android, and Arm NN. # ================================================================================ diff --git a/content/learning-paths/smartphones-and-mobile/totalcompute/build.md b/content/learning-paths/smartphones-and-mobile/totalcompute/build.md index 0b3a23113..b02a3c462 100644 --- a/content/learning-paths/smartphones-and-mobile/totalcompute/build.md +++ b/content/learning-paths/smartphones-and-mobile/totalcompute/build.md @@ -7,7 +7,7 @@ weight: 2 # 1 is first, 2 is second, etc. # Do not modify these elements layout: "learningpathall" --- -The [Arm Total Compute](https://developer.arm.com/Tools%20and%20Software/Total%20Compute) reference software stack is a fully integrated open-source stack, from firmware up to Android. he stack includes open-source code available from the relevant upstream projects: SCP firmware, Trusted firmware, Linux kernel, Android, and ArmNN. +The [Arm Total Compute](https://developer.arm.com/Tools%20and%20Software/Total%20Compute) reference software stack is a fully integrated open-source stack, from firmware up to Android. he stack includes open-source code available from the relevant upstream projects: SCP firmware, Trusted firmware, Linux kernel, Android, and Arm NN. ## Download and install the FVP