Skip to content

Commit

Permalink
IGNITE-22705 Fix JavaDoc warnings about invalid usage of tag (#11563)
Browse files Browse the repository at this point in the history
  • Loading branch information
chesnokoff authored Oct 3, 2024
1 parent 20160fa commit 6fb76db
Show file tree
Hide file tree
Showing 25 changed files with 62 additions and 61 deletions.
14 changes: 7 additions & 7 deletions modules/core/src/main/java/org/apache/ignite/IgniteCache.java
Original file line number Diff line number Diff line change
Expand Up @@ -188,12 +188,12 @@ public interface IgniteCache<K, V> extends javax.cache.Cache<K, V>, IgniteAsyncS
* <p>
* Full list of repairable methods:
* <ul>
* <li>{@link IgniteCache#containsKey} && {@link IgniteCache#containsKeyAsync}</li>
* <li>{@link IgniteCache#containsKeys} && {@link IgniteCache#containsKeysAsync}</li>
* <li>{@link IgniteCache#getEntry} && {@link IgniteCache#getEntryAsync}</li>
* <li>{@link IgniteCache#getEntries} && {@link IgniteCache#getEntriesAsync}</li>
* <li>{@link IgniteCache#get} && {@link IgniteCache#getAsync}</li>
* <li>{@link IgniteCache#getAll} && {@link IgniteCache#getAllAsync}</li>
* <li>{@link IgniteCache#containsKey} &amp;&amp; {@link IgniteCache#containsKeyAsync}</li>
* <li>{@link IgniteCache#containsKeys} &amp;&amp; {@link IgniteCache#containsKeysAsync}</li>
* <li>{@link IgniteCache#getEntry} &amp;&amp; {@link IgniteCache#getEntryAsync}</li>
* <li>{@link IgniteCache#getEntries} &amp;&amp; {@link IgniteCache#getEntriesAsync}</li>
* <li>{@link IgniteCache#get} &amp;&amp; {@link IgniteCache#getAsync}</li>
* <li>{@link IgniteCache#getAll} &amp;&amp; {@link IgniteCache#getAllAsync}</li>
* </ul>
* @param strategy Read Repair strategy.
* @return Cache with explicit consistency check on each read and repair if necessary.
Expand Down Expand Up @@ -226,7 +226,7 @@ public interface IgniteCache<K, V> extends javax.cache.Cache<K, V>, IgniteAsyncS
* (which will be stored in binary format), you should acquire following projection
* to avoid deserialization:
* <pre>
* IgniteCache<Integer, BinaryObject> prj = cache.withKeepBinary();
* IgniteCache&lt;Integer, BinaryObject&gt; prj = cache.withKeepBinary();
*
* // Value is not deserialized and returned in binary format.
* BinaryObject po = prj.get(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ public interface IgniteCluster extends ClusterGroup, IgniteAsyncSupport {
* Returned result is collection of tuples. Each tuple corresponds to one node start attempt and
* contains hostname, success flag and error message if attempt was not successful. Note that
* successful attempt doesn't mean that node was actually started and joined topology. For large
* topologies (> 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* topologies (&gt; 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* node logs for details.
* <p>
* Supports asynchronous execution (see {@link IgniteAsyncSupport}).
Expand Down Expand Up @@ -157,7 +157,7 @@ public Collection<ClusterStartNodeResult> startNodes(File file, boolean restart,
* Completed future contains collection of tuples. Each tuple corresponds to one node start attempt and
* contains hostname, success flag and error message if attempt was not successful. Note that
* successful attempt doesn't mean that node was actually started and joined topology. For large
* topologies (> 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* topologies (&gt; 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* node logs for details.
*
* @param file Configuration file.
Expand Down Expand Up @@ -254,7 +254,7 @@ public IgniteFuture<Collection<ClusterStartNodeResult>> startNodesAsync(File fil
* Returned result is collection of tuples. Each tuple corresponds to one node start attempt and
* contains hostname, success flag and error message if attempt was not successful. Note that
* successful attempt doesn't mean that node was actually started and joined topology. For large
* topologies (> 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* topologies (&gt; 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* node logs for details.
* <p>
* Supports asynchronous execution (see {@link IgniteAsyncSupport}).
Expand Down Expand Up @@ -356,7 +356,7 @@ public Collection<ClusterStartNodeResult> startNodes(Collection<Map<String, Obje
* Completed future contains collection of tuples. Each tuple corresponds to one node start attempt and
* contains hostname, success flag and error message if attempt was not successful. Note that
* successful attempt doesn't mean that node was actually started and joined topology. For large
* topologies (> 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* topologies (&gt; 100s nodes) it can take over 10 minutes for all nodes to start. See individual
* node logs for details.
*
* @param hosts Startup parameters.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,15 +101,15 @@ public class IgniteJdbcThinDataSource implements DataSource, Serializable {
}

/**
* Different application servers us different format (URL & url).
* Different application servers us different format (URL &amp; url).
* @return Connection URL.
*/
public String getURL() {
return getUrl();
}

/**
* Different application servers us different format (URL & url).
* Different application servers us different format (URL &amp; url).
* @param url Connection URL.
* @throws SQLException On error whrn URL is invalid.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1698,7 +1698,7 @@ public final class IgniteSystemProperties {
/**
* Flag to disable memory optimization:
* BitSets instead of HashSets to store partitions.
* When number of backups per partion is > IGNITE_AFFINITY_BACKUPS_THRESHOLD we use HashMap to improve contains()
* When number of backups per partion is &gt; IGNITE_AFFINITY_BACKUPS_THRESHOLD we use HashMap to improve contains()
* which leads to extra memory consumption, otherwise we use view on the
* list of cluster nodes to reduce memory consumption on redundant data structures.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,28 +44,28 @@
*
* <h2 class="header">Java Example</h2>
* <pre name="code" class="java">
* IgniteCache<Integer, String> cache = grid(0).cache(null);
* IgniteCache&lt;Integer, String&gt; cache = grid(0).cache(null);
*
* CacheEntry<String, Integer> entry1 = cache.invoke(100,
* new EntryProcessor<Integer, String, CacheEntry<String, Integer>>() {
* public CacheEntry<String, Integer> process(MutableEntry<Integer, String> entry,
* CacheEntry&lt;String, Integer&gt; entry1 = cache.invoke(100,
* new EntryProcessor&lt;Integer, String, CacheEntry&lt;String, Integer&gt;&gt;() {
* public CacheEntry&lt;String, Integer&gt; process(MutableEntry&lt;Integer, String&gt; entry,
* Object... arguments) throws EntryProcessorException {
* return entry.unwrap(CacheEntry.class);
* }
* });
*
* // Cache entry for the given key may be updated at some point later.
*
* CacheEntry<String, Integer> entry2 = cache.invoke(100,
* new EntryProcessor<Integer, String, CacheEntry<String, Integer>>() {
* public CacheEntry<String, Integer> process(MutableEntry<Integer, String> entry,
* CacheEntry&lt;String, Integer&gt; entry2 = cache.invoke(100,
* new EntryProcessor&lt;Integer, String, CacheEntry&lt;String, Integer&gt;&gt;() {
* public CacheEntry&lt;String, Integer&gt; process(MutableEntry&lt;Integer, String&gt; entry,
* Object... arguments) throws EntryProcessorException {
* return entry.unwrap(CacheEntry.class);
* }
* });
*
* // Comparing entries' versions.
* if (entry1.version().compareTo(entry2.version()) < 0) {
* if (entry1.version().compareTo(entry2.version()) &lt; 0) {
* // the entry has been updated
* }
* </pre>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ public Map<String, String> getAliases() {

/**
* Sets mapping from full property name in dot notation to an alias that will be used as SQL column name.
* Example: {"parent.name" -> "parentName"}.
* Example: {"parent.name" -&gt; "parentName"}.
*
* @param aliases Aliases map.
* @return {@code this} for chaining.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@
* ContinuousQuery&lt;Long, Person&gt; qry = new ContinuousQuery&lt;&gt;();
*
* // Initial iteration query will return all people with salary above 1000.
* qry.setInitialQuery(new ScanQuery&lt;&gt;((id, p) -> p.getSalary() &gt; 1000));
* qry.setInitialQuery(new ScanQuery&lt;&gt;((id, p) -&gt; p.getSalary() &gt; 1000));
*
*
* // Callback that is called locally when update notifications are received.
* // It simply prints out information about all created or modified records.
* qry.setLocalListener((evts) -> {
* qry.setLocalListener((evts) -&gt; {
* for (CacheEntryEvent&lt;? extends Long, ? extends Person&gt; e : evts) {
* Person p = e.getValue();
*
Expand All @@ -80,7 +80,7 @@
* });
*
* // The continuous listener will be notified for people with salary above 1000.
* qry.setRemoteFilter(evt -> evt.getValue().getSalary() &gt; 1000);
* qry.setRemoteFilter(evt -&gt; evt.getValue().getSalary() &gt; 1000);
*
* // Execute the query and get a cursor that iterates through the initial data.
* QueryCursor&lt;Cache.Entry&lt;Long, Person&gt;&gt; cur = cache.query(qry);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1943,7 +1943,7 @@ protected JdbcTypeField[] valueColumns() {
/**
* Get full table name.
*
* @return &lt;schema&gt;.&lt;table name&gt
* @return &lt;schema&gt;.&lt;table name&gt;
*/
protected String fullTableName() {
return fullTblName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,7 @@ public <T> IgniteClientFuture<Map<K, EntryProcessorResult<T>>> invokeAllAsync(
* (which will be stored in binary format), you should acquire following projection
* to avoid deserialization:
* <pre>
* CacheClient<Integer, BinaryObject> prj = cache.withKeepBinary();
* CacheClient&lt;Integer, BinaryObject&gt; prj = cache.withKeepBinary();
*
* // Value is not deserialized and returned in binary format.
* BinaryObject po = prj.get(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@
* {@link IgniteConfiguration#getUserAttributes()} method to initialize your custom
* node attributes at startup. Here is an example of how to assign an attribute to a node at startup:
* <pre name="code" class="xml">
* &lt;bean class="org.apache.ignite.configuration.IgniteConfiguration">
* &lt;bean class="org.apache.ignite.configuration.IgniteConfiguration"&gt;
* ...
* &lt;property name="userAttributes">
* &lt;map>
* &lt;entry key="worker" value="true"/>
* &lt;/map>
* &lt;/property>
* &lt;property name="userAttributes"&gt;
* &lt;map&gt;
* &lt;entry key="worker" value="true"/&gt;
* &lt;/map&gt;
* &lt;/property&gt;
* ...
* &lt;/bean&gt;
* </pre>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
* protected Collection&lt;? extends ComputeJob&gt; split(int gridSize, final String arg) throws IgniteCheckedException {
* List&lt;ComputeJobAdapter&lt;String&gt;&gt; jobs = new ArrayList&lt;ComputeJobAdapter&lt;String&gt;&gt;(gridSize);
*
* for (int i = 0; i < gridSize; i++) {
* for (int i = 0; i &lt; gridSize; i++) {
* jobs.add(new ComputeJobAdapter() {
* // Job execution logic.
* public Object execute() throws IgniteCheckedException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@
* is transparent to your code and is handled automatically by the adapter.
* Here is an example of how your task will look:
* <pre name="code" class="java">
* public class MyFooBarTask extends ComputeTaskSplitAdapter&lt;String> {
* public class MyFooBarTask extends ComputeTaskSplitAdapter&lt;String&gt; {
* &#64;Override
* protected Collection&lt;? extends ComputeJob> split(int gridSize, String arg) throws IgniteCheckedException {
* List&lt;MyFooBarJob> jobs = new ArrayList&lt;MyFooBarJob>(gridSize);
* protected Collection&lt;? extends ComputeJob&gt; split(int gridSize, String arg) throws IgniteCheckedException {
* List&lt;MyFooBarJob&gt; jobs = new ArrayList&lt;MyFooBarJob&gt;(gridSize);
*
* for (int i = 0; i &lt; gridSize; i++) {
* jobs.add(new MyFooBarJob(arg));
Expand All @@ -61,14 +61,14 @@
* case we manually inject load balancer and use it to pick the best node. Doing it in
* such way would allow user to map some jobs manually and for others use load balancer.
* <pre name="code" class="java">
* public class MyFooBarTask extends ComputeTaskAdapter&lt;String, String> {
* public class MyFooBarTask extends ComputeTaskAdapter&lt;String, String&gt; {
* // Inject load balancer.
* &#64;LoadBalancerResource
* ComputeLoadBalancer balancer;
*
* // Map jobs to grid nodes.
* public Map&lt;? extends ComputeJob, ClusterNode> map(List&lt;ClusterNode> subgrid, String arg) throws IgniteCheckedException {
* Map&lt;MyFooBarJob, ClusterNode> jobs = new HashMap&lt;MyFooBarJob, ClusterNode>(subgrid.size());
* public Map&lt;? extends ComputeJob, ClusterNode&gt; map(List&lt;ClusterNode&gt; subgrid, String arg) throws IgniteCheckedException {
* Map&lt;MyFooBarJob, ClusterNode&gt; jobs = new HashMap&lt;MyFooBarJob, ClusterNode&gt;(subgrid.size());
*
* // In more complex cases, you can actually do
* // more complicated assignments of jobs to nodes.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,15 @@
* <pre name="code" class="java">
* ...
* // This field will be injected with task continuous mapper.
* &#64TaskContinuousMapperResource
* {@literal @}TaskContinuousMapperResource
* private ComputeTaskContinuousMapper mapper;
* ...
* </pre>
* or from a setter method:
* <pre name="code" class="java">
* // This setter method will be automatically called by the system
* // to set grid task continuous mapper.
* &#64TaskContinuousMapperResource
* {@literal @}TaskContinuousMapperResource
* void setSession(ComputeTaskContinuousMapper mapper) {
* this.mapper = mapper;
* }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,15 +85,15 @@
* <pre name="code" class="java">
* ...
* // This field will be injected with distributed task session.
* &#64TaskSessionResource
* {@literal @}TaskSessionResource
* private ComputeTaskSession ses;
* ...
* </pre>
* or from a setter method:
* <pre name="code" class="java">
* // This setter method will be automatically called by the system
* // to set grid task session.
* &#64TaskSessionResource
* {@literal @}TaskSessionResource
* void setSession(ComputeTaskSession ses) {
* this.ses = ses;
* }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
* &lt;!-- User version. --&gt;
* &lt;bean id="userVersion" class="java.lang.String"&gt;
* &lt;constructor-arg value="0"/&gt;
* &lt;/bean>
* &lt;/bean&gt;
* </pre>
* By default, all ignite startup scripts ({@code ignite.sh} or {@code ignite.bat})
* pick up user version from {@code IGNITE_HOME/config/userversion} folder. Usually, it
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,18 +158,18 @@ public SqlConfiguration setLongQueryWarningTimeout(long longQryWarnTimeout) {
}

/**
* Is key & value validation enabled.
* Is key &amp; value validation enabled.
*
* @return {@code true} When key & value shall be validated against SQL schema.
* @return {@code true} When key &amp; value shall be validated against SQL schema.
*/
public boolean isValidationEnabled() {
return validationEnabled;
}

/**
* Enable/disable key & value validation.
* Enable/disable key &amp; value validation.
*
* @param validationEnabled {@code true} When key & value shall be validated against SQL schema.
* @param validationEnabled {@code true} When key &amp; value shall be validated against SQL schema.
* Default value is {@code false}.
* @return {@code this} for chaining.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@
*
* // Callback that is called locally when update notifications are received.
* // It simply prints out information about all created persons.
* qry.setLocalListener((evts) -> {
* qry.setLocalListener((evts) -&gt; {
* for (CacheEntryEvent&lt;? extends Long, ? extends Person&gt; e : evts) {
* Person p = e.getValue();
*
Expand All @@ -94,7 +94,7 @@
* });
*
* // Sets remote filter.
* qry.setRemoteFilterFactory(() -> new ExampleCacheEntryFilter());
* qry.setRemoteFilterFactory(() -&gt; new ExampleCacheEntryFilter());
*
* // Execute query.
* QueryCursor&lt;Cache.Entry&lt;Long, Person&gt;&gt; cur = cache.query(qry);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
*/
public interface MarshallerContext {
/**
* Method to register typeId->class name mapping in marshaller context <b>cluster-wide</b>.
* Method to register typeId-&gt;class name mapping in marshaller context &lt;b&gt;cluster-wide&lt;/b&gt;.
*
* This method <b>guarantees</b> that mapping is delivered to all nodes in cluster
* and blocks caller thread until then.
Expand Down Expand Up @@ -68,7 +68,7 @@ public boolean registerClassName(
) throws IgniteCheckedException;

/**
* Method to register typeId->class name mapping in marshaller context <b>on local node only</b>.
* Method to register typeId-&gt;class name mapping in marshaller context &lt;b&gt;on local node only&lt;/b&gt;.
*
* <b>No guarantees</b> that the mapping is presented on other nodes are provided.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
*/

/**
* Contains entry-point <b>Ignite & HPC APIs.</b>
* Contains entry-point <b>Ignite &amp; HPC APIs.</b>
*/

package org.apache.ignite;
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@
* <p>
* Usage example:
* <pre name="code" class="java">
* ServiceCallInterceptor security = (mtd, args, ctx, svcCall) -> {
* ServiceCallInterceptor security = (mtd, args, ctx, svcCall) -&gt; {
* if (!CustomSecurityProvider.get().access(mtd, ctx.currentCallContext().attribute("sessionId")))
* throw new SecurityException("Method invocation is not permitted");
*
* // Execute remaining interceptors and service method.
* return svcCall.call();
* };
*
* ServiceCallInterceptor audit = (mtd, args, ctx, svcCall) -> {
* ServiceCallInterceptor audit = (mtd, args, ctx, svcCall) -&gt; {
* String sessionId = ctx.currentCallContext().attribute("sessionId");
* AuditProvider prov = AuditProvider.get();
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@
* if (useAvg) {
* double load = metrics.getAverageActiveJobs() + metrics.getAverageWaitingJobs();
*
* if (load > 0) {
* if (load &gt; 0) {
* return load;
* }
* }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
* if (useAvg) {
* double load = metrics.getAverageActiveJobs() + metrics.getAverageWaitingJobs();
*
* if (load > 0) {
* if (load &gt; 0) {
* return load;
* }
* }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
* Integer v1 = cache.get("k1");
*
* // Check if v1 satisfies some condition before doing a put.
* if (v1 != null && v1 > 0)
* if (v1 != null &amp;&amp; v1 &gt; 0)
* cache.put("k1", 2);
*
* cache.remove("k2");
Expand Down
Loading

0 comments on commit 6fb76db

Please sign in to comment.