IDEMPIERE-5816 Upgrade Hazelcast version (FHCA-4436) (#1955)

* IDEMPIERE-5816 Upgrade Hazelcast version (FHCA-4436)

* - fix on discovery of service using UUID
This commit is contained in:
Carlos Ruiz 2023-07-29 02:34:20 +02:00 committed by GitHub
parent 1eef599bb2
commit c85cc9fe7a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 317 additions and 155 deletions

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<!-- <!--
~ Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved. ~ Copyright (c) 2008-2023, Hazelcast, Inc. All Rights Reserved.
~ ~
~ Licensed under the Apache License, Version 2.0 (the "License"); ~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License. ~ you may not use this file except in compliance with the License.
@ -21,26 +21,43 @@
This XML file is used when no hazelcast.xml is present. This XML file is used when no hazelcast.xml is present.
To learn how to configure Hazelcast, please see the schema at To learn how to configure Hazelcast, please see the schema at
https://hazelcast.com/schema/config/hazelcast-config-3.12.xsd https://hazelcast.com/schema/config/hazelcast-config-5.3.xsd
or the Reference Manual at https://hazelcast.org/documentation/ or the Reference Manual at https://docs.hazelcast.com/
--> -->
<!--suppress XmlDefaultAttributeValue --> <!--suppress XmlDefaultAttributeValue -->
<hazelcast xmlns="http://www.hazelcast.com/schema/config" <hazelcast xmlns="http://www.hazelcast.com/schema/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.hazelcast.com/schema/config xsi:schemaLocation="http://www.hazelcast.com/schema/config
http://www.hazelcast.com/schema/config/hazelcast-config-3.12.xsd"> http://www.hazelcast.com/schema/config/hazelcast-config-5.3.xsd">
<!--
The name of the cluster. All members of a single cluster must have the same cluster name
configured and a client connecting to this cluster must use it as well.
-->
<cluster-name>@ADEMPIERE_DB_USER@.@ADEMPIERE_DB_NAME@/@ADEMPIERE_DB_SERVER@:@ADEMPIERE_DB_PORT@</cluster-name>
<group>
<name>@ADEMPIERE_DB_USER@.@ADEMPIERE_DB_NAME@/@ADEMPIERE_DB_SERVER@:@ADEMPIERE_DB_PORT@</name>
<!-- https://github.com/hazelcast/hazelcast/issues/11667 -->
</group>
<management-center enabled="false">http://localhost:8080/hazelcast-mancenter</management-center>
<!-- https://docs.hazelcast.org/docs/3.11/manual/html-single/index.html#time-window-for-split-brain-protection -->
<properties> <properties>
<property name="hazelcast.discovery.enabled">false</property> <property name="hazelcast.discovery.enabled">false</property>
</properties> </properties>
<network> <network>
<!--
The preferred port number where the Hazelcast instance will listen. The convention is
to use 5701 and it is the default both here and in various tools connecting to
Hazelcast. This configuration has the following attributes:
- port-count:
The default value is 100, meaning that Hazelcast will try to bind 100 ports.
If you set the value of port as 5701, as members join the cluster, Hazelcast tries
to find ports between 5701 and 5801. You can change the port count in cases like
having large instances on a single machine or you are willing to have only a few
ports assigned.
- auto-increment:
Default value is true. If port is set to 5701, Hazelcast will try to find free
ports between 5701 and 5801. Normally, you will not need to change this value, but
it comes in handy when needed. You may also want to choose to use only one port.
In that case, you can disable the auto-increment feature of port by setting its
value as false.
-->
<port auto-increment="true" port-count="100">5701</port> <port auto-increment="true" port-count="100">5701</port>
<outbound-ports> <outbound-ports>
<!-- <!--
@ -49,48 +66,79 @@
--> -->
<ports>0</ports> <ports>0</ports>
</outbound-ports> </outbound-ports>
<!--
This configuration lets you choose a discovery mechanism that Hazelcast will use to
form a cluster. Hazelcast can find members by multicast, TCP/IP lists and by various
discovery mechanisms provided by different cloud APIs.
-->
<join> <join>
<!--
Configuration for the Discovery Strategy Auto Detection. When it's enabled, it will
walk through all available discovery strategies and detect the correct one for the
current runtime environment.
-->
<auto-detection enabled="true"/>
<!-- Enables/disables the multicast discovery mechanism. The default value is disabled. -->
<multicast enabled="false"> <multicast enabled="false">
<!--
Specifies the multicast group IP address when you want to create clusters
within the same network. Its default value is 224.2.2.3.
-->
<multicast-group>224.2.2.3</multicast-group> <multicast-group>224.2.2.3</multicast-group>
<!--
Specifies the multicast socket port that the Hazelcast member listens to and
sends discovery messages through. Its default value is 54327.
-->
<multicast-port>54327</multicast-port> <multicast-port>54327</multicast-port>
</multicast> </multicast>
<!-- Specifies whether the TCP/IP discovery is enabled or not. Default value is false. -->
<tcp-ip enabled="false"> <tcp-ip enabled="false">
<interface>127.0.0.1</interface> <interface>127.0.0.1</interface>
<member-list> <member-list>
<member>127.0.0.1</member> <member>127.0.0.1</member>
</member-list> </member-list>
</tcp-ip> </tcp-ip>
<!--
Specifies whether the member use the AWS API to get a list of candidate IPs to
check. "access-key" and "secret-key" are needed to access the AWS APIs and the
rest of the parameters work as filtering criteria that narrow down the list of
IPs to check. Default value is false.
-->
<aws enabled="false"> <aws enabled="false">
<access-key>my-access-key</access-key>
<secret-key>my-secret-key</secret-key>
<!--optional, default is us-east-1 -->
<region>us-west-1</region>
<!--optional, default is ec2.amazonaws.com. If set, region shouldn't be set as it will override this property -->
<host-header>ec2.amazonaws.com</host-header>
<!-- optional, only instances belonging to this group will be discovered, default will try all running instances -->
<security-group-name>hazelcast-sg</security-group-name>
<tag-key>type</tag-key>
<tag-value>hz-nodes</tag-value>
</aws> </aws>
<!--
Specifies whether the member use the GCP APIs to get a list of candidate IPs to
check.
-->
<gcp enabled="false"> <gcp enabled="false">
<zones>us-east1-b,us-east1-c</zones>
</gcp> </gcp>
<!-- Specifies whether the member use the Azure REST API to get a list of candidate IPs to
check.
-->
<azure enabled="false"> <azure enabled="false">
<client-id>CLIENT_ID</client-id>
<client-secret>CLIENT_SECRET</client-secret>
<tenant-id>TENANT_ID</tenant-id>
<subscription-id>SUB_ID</subscription-id>
<cluster-id>HZLCAST001</cluster-id>
<group-name>GROUP-NAME</group-name>
</azure> </azure>
<!--
Specifies whether the member use the Kubernetes APIs to get a list of candidate IPs to
check.
-->
<kubernetes enabled="false"> <kubernetes enabled="false">
<namespace>MY-KUBERNETES-NAMESPACE</namespace>
<service-name>MY-SERVICE-NAME</service-name>
<service-label-name>MY-SERVICE-LABEL-NAME</service-label-name>
<service-label-value>MY-SERVICE-LABEL-VALUE</service-label-value>
</kubernetes> </kubernetes>
<!--
Specifies whether the member use the Eureka Service Registry to get a list of candidate
IPs to check.
-->
<eureka enabled="false"> <eureka enabled="false">
<!--
Defines if the Eureka Discovery SPI plugin will register itself with the Eureka 1
service discovery. It is optional. Default value is true.
-->
<self-registration>true</self-registration> <self-registration>true</self-registration>
<!--
Defines an eureka namespace to not collide with other service registry clients
in eureka-client.properties file. It is optional. Default value is hazelcast.
-->
<namespace>hazelcast</namespace> <namespace>hazelcast</namespace>
</eureka> </eureka>
<discovery-strategies> <discovery-strategies>
@ -110,11 +158,45 @@
</discovery-strategy> </discovery-strategy>
</discovery-strategies> </discovery-strategies>
</join> </join>
<!--
Specifies which network interfaces Hazelcast should use. You need to set its "enabled"
attribute to true to be able to use your defined interfaces. You can define multiple
interfaces using its <interface> sub-element. By default, it is disabled.
-->
<interfaces enabled="false"> <interfaces enabled="false">
<interface>10.10.1.*</interface> <interface>10.10.1.*</interface>
</interfaces> </interfaces>
<!--
Lets you configure SSL using the SSL context factory. This feature is available
only in Hazelcast Enterprise. To be able to use it, encryption should NOT be enabled
and you should first implement your SSLContextFactory class. Its configuration contains
the factory class and SSL properties. By default, it is disabled.
-->
<ssl enabled="false"/> <ssl enabled="false"/>
<!--
Lets you add custom hooks to join and perform connection procedures (like a custom
authentication negotiation protocol, etc.). This feature is available only in Hazelcast
Enterprise. To be able to use it, you should first implement the MemberSocketInterceptor
(for members joining to a cluster) or SocketInterceptor (for clients connecting to a
member) class. Its configuration contains the class you implemented and socket
interceptor properties. By default, it is disabled. The following is an example:
<socket-interceptor enabled="true">
<class-name>
com.hazelcast.examples.MySocketInterceptor
</class-name>
<properties>
<property name="property1">value1</property>
<property name="property2">value2</property>
</properties>
</socket-interceptor>
-->
<socket-interceptor enabled="false"/> <socket-interceptor enabled="false"/>
<!--
Lets you encrypt the entire socket level communication among all Hazelcast members.
This feature is available only in Hazelcast Enterprise. Its configuration contains
the encryption properties and the same configuration must be placed to all members.
By default, it is disabled.
-->
<symmetric-encryption enabled="false"> <symmetric-encryption enabled="false">
<!-- <!--
encryption algorithm such as encryption algorithm such as
@ -138,10 +220,24 @@
</network> </network>
<partition-group enabled="false"/> <partition-group enabled="false"/>
<executor-service name="default"> <executor-service name="default">
<pool-size>16</pool-size>
<!--Queue capacity. 0 means Integer.MAX_VALUE.--> <!--Queue capacity. 0 means Integer.MAX_VALUE.-->
<queue-capacity>0</queue-capacity> <queue-capacity>0</queue-capacity>
<pool-size>16</pool-size>
<statistics-enabled>true</statistics-enabled>
</executor-service> </executor-service>
<durable-executor-service name="default">
<capacity>100</capacity>
<durability>1</durability>
<pool-size>16</pool-size>
<statistics-enabled>true</statistics-enabled>
</durable-executor-service>
<scheduled-executor-service name="default">
<capacity>100</capacity>
<durability>1</durability>
<pool-size>16</pool-size>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
<statistics-enabled>true</statistics-enabled>
</scheduled-executor-service>
<security> <security>
<client-block-unmapped-actions>true</client-block-unmapped-actions> <client-block-unmapped-actions>true</client-block-unmapped-actions>
</security> </security>
@ -165,11 +261,27 @@
Number of async backups. 0 means no backup. Number of async backups. 0 means no backup.
--> -->
<async-backup-count>0</async-backup-count> <async-backup-count>0</async-backup-count>
<!--
Used to purge unused or empty queues. If you define a value (time in seconds)
for this element, then your queue will be destroyed if it stays empty or
unused for that time.
-->
<empty-queue-ttl>-1</empty-queue-ttl> <empty-queue-ttl>-1</empty-queue-ttl>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy> <merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</queue> </queue>
<!--
Configuration for a device, which a tiered-store can reference and use for its disk-tier.
-->
<local-device name="default-tiered-store-device">
<base-dir>tiered-store</base-dir>
<capacity unit="GIGABYTES" value="256"/>
<block-size>4096</block-size>
<read-io-thread-count>4</read-io-thread-count>
<write-io-thread-count>4</write-io-thread-count>
</local-device>
<map name="default"> <map name="default">
<!-- <!--
Data type that will be used for storing recordMap. Data type that will be used for storing recordMap.
@ -204,35 +316,8 @@
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
--> -->
<max-idle-seconds>1800</max-idle-seconds> <max-idle-seconds>1800</max-idle-seconds>
<!--
Valid values are:
NONE (no eviction),
LRU (Least Recently Used),
LFU (Least Frequently Used).
NONE is the default.
-->
<eviction-policy>LRU</eviction-policy>
<!--
Maximum size of the map. When max size is reached,
map is evicted based on the policy defined.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size policy="PER_NODE">1000</max-size>
<!--
`eviction-percentage` property is deprecated and will be ignored when it is set.
As of version 3.7, eviction mechanism changed. <eviction eviction-policy="LRU" max-size-policy="PER_NODE" size="1000"/>
It uses a probabilistic algorithm based on sampling. Please see documentation for further details
-->
<eviction-percentage>25</eviction-percentage>
<!--
`min-eviction-check-millis` property is deprecated and will be ignored when it is set.
As of version 3.7, eviction mechanism changed.
It uses a probabilistic algorithm based on sampling. Please see documentation for further details
-->
<min-eviction-check-millis>100</min-eviction-check-millis>
<!-- <!--
While recovering from split-brain (network partitioning), While recovering from split-brain (network partitioning),
map entries in the small cluster will merge into the bigger cluster map entries in the small cluster will merge into the bigger cluster
@ -243,10 +328,10 @@
the policy set here. Default policy is PutIfAbsentMapMergePolicy the policy set here. Default policy is PutIfAbsentMapMergePolicy
There are built-in merge policies such as There are built-in merge policies such as
com.hazelcast.map.merge.PassThroughMergePolicy; entry will be overwritten if merging entry exists for the key. com.hazelcast.spi.merge.PassThroughMergePolicy; entry will be overwritten if merging entry exists for the key.
com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster. com.hazelcast.spi.merge.PutIfAbsentMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins. com.hazelcast.spi.merge.HigherHitsMergePolicy ; entry with the higher hits wins.
com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins. com.hazelcast.spi.merge.LatestUpdateMergePolicy ; entry with the latest update wins.
--> -->
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy> <merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
@ -259,40 +344,47 @@
--> -->
<cache-deserialized-values>INDEX-ONLY</cache-deserialized-values> <cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
<!--
Whether map level statistical information (total
hits, memory-cost etc.) should be gathered and stored.
-->
<statistics-enabled>true</statistics-enabled>
<!--
Whether statistical information (hits, creation
time, last access time etc.) should be gathered
and stored. You have to enable this if you plan to
implement a custom eviction policy, out-of-the-box
eviction policies work regardless of this setting.
-->
<per-entry-stats-enabled>false</per-entry-stats-enabled>
<!--
Tiered Store configuration. By default, it is disabled.
-->
<tiered-store enabled="false">
<memory-tier>
<!--
The amount of memory to be reserved for the memory-tier of the tiered-store instance
of this map.
-->
<capacity unit="MEGABYTES" value="256"/>
</memory-tier>
<!--
Whether disk-tier is enabled, and the name of the device to be used for the disk-tier
of the tiered-store instance of this map.
-->
<disk-tier enabled="false" device-name="default-tiered-store-device"/>
</tiered-store>
<near-cache name="localNearCache"> <near-cache name="localNearCache">
<in-memory-format>OBJECT</in-memory-format> <in-memory-format>OBJECT</in-memory-format>
<cache-local-entries>true</cache-local-entries> <cache-local-entries>true</cache-local-entries>
</near-cache> </near-cache>
</map> </map>
<!--
Configuration for an event journal. The event journal keeps events related
to a specific partition and data structure. For instance, it could keep
map add, update, remove, merge events along with the key, old value, new value and so on.
-->
<event-journal enabled="false">
<mapName>mapName</mapName>
<capacity>10000</capacity>
<time-to-live-seconds>0</time-to-live-seconds>
</event-journal>
<event-journal enabled="false">
<cacheName>cacheName</cacheName>
<capacity>10000</capacity>
<time-to-live-seconds>0</time-to-live-seconds>
</event-journal>
<!--
Configuration for a merkle tree.
The merkle tree is a data structure used for efficient comparison of the
difference in the contents of large data structures. The precision of
such a comparison mechanism is defined by the depth of the merkle tree.
-->
<merkle-tree enabled="false">
<mapName>mapName</mapName>
<depth>10</depth>
</merkle-tree>
<multimap name="default"> <multimap name="default">
<backup-count>1</backup-count> <backup-count>1</backup-count>
<value-collection-type>SET</value-collection-type> <value-collection-type>SET</value-collection-type>
@ -316,22 +408,6 @@
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy> <merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</set> </set>
<jobtracker name="default">
<max-thread-size>0</max-thread-size>
<!-- Queue size 0 means number of partitions * 2 -->
<queue-size>0</queue-size>
<retry-count>0</retry-count>
<chunk-size>1000</chunk-size>
<communicate-stats>true</communicate-stats>
<topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy>
</jobtracker>
<semaphore name="default">
<initial-permits>0</initial-permits>
<backup-count>1</backup-count>
<async-backup-count>0</async-backup-count>
</semaphore>
<reliable-topic name="default"> <reliable-topic name="default">
<read-batch-size>10</read-batch-size> <read-batch-size>10</read-batch-size>
<topic-overload-policy>BLOCK</topic-overload-policy> <topic-overload-policy>BLOCK</topic-overload-policy>
@ -348,29 +424,58 @@
</ringbuffer> </ringbuffer>
<flake-id-generator name="default"> <flake-id-generator name="default">
<!--
The number of IDs are pre-fetched on the background when one call to
FlakeIdGenerator#newId() is made.
-->
<prefetch-count>100</prefetch-count> <prefetch-count>100</prefetch-count>
<!--
The validity timeout in ms for how long the pre-fetched IDs can be used. If this
time elapses, a new batch of IDs will be fetched. The generated IDs contain timestamp
component, which ensures rough global ordering of IDs. If an ID is assigned to an
object that was created much later, it will be much out of order. If you don't care
about ordering, set this value to 0. This setting pertains only to newId() calls made
on the member that configured it.
-->
<prefetch-validity-millis>600000</prefetch-validity-millis> <prefetch-validity-millis>600000</prefetch-validity-millis>
<id-offset>0</id-offset> <!--
The offset for the timestamp component in milliseconds. The default value corresponds
to the beginning of 2018, (1.1.2018 0:00 UTC). You can adjust the value to determine
the lifespan of the generator.
-->
<epoch-start>1514764800000</epoch-start>
<!--
The offset that will be added to the node ID assigned to cluster member for this generator.
Might be useful in A/B deployment scenarios where you have cluster A which you want to upgrade.
You create cluster B and for some time both will generate IDs and you want to have them unique.
In this case, configure node ID offset for generators on cluster B.
-->
<node-id-offset>0</node-id-offset> <node-id-offset>0</node-id-offset>
<!--
The bit-length of the sequence component of this flake id generator. This configuration
is limiting factor for the maximum rate at which IDs can be generated. Default is 6 bits.
-->
<bits-sequence>6</bits-sequence>
<!-- The bit-length of node id component of this flake id generator. Default value is 16 bits. -->
<bits-node-id>16</bits-node-id>
<!--
Sets how far to the future is the generator allowed to go to generate IDs without blocking,
default is 15 seconds.
-->
<allowed-future-millis>15000</allowed-future-millis>
<!-- Enables/disables statistics gathering for the flake-id generator on this member. -->
<statistics-enabled>true</statistics-enabled> <statistics-enabled>true</statistics-enabled>
</flake-id-generator> </flake-id-generator>
<atomic-long name="default"> <!--
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy> The version of the portable serialization. Portable version is used to differentiate two same
</atomic-long> classes that have changes on it like adding/removing field or changing a type of a field.
-->
<atomic-reference name="default">
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</atomic-reference>
<count-down-latch name="default"/>
<serialization> <serialization>
<portable-version>0</portable-version> <portable-version>0</portable-version>
</serialization> </serialization>
<services enable-defaults="true"/> <!-- Enables a Hazelcast member to be a lite member -->
<lite-member enabled="false"/> <lite-member enabled="false"/>
<cardinality-estimator name="default"> <cardinality-estimator name="default">
@ -379,13 +484,6 @@
<merge-policy batch-size="100">HyperLogLogMergePolicy</merge-policy> <merge-policy batch-size="100">HyperLogLogMergePolicy</merge-policy>
</cardinality-estimator> </cardinality-estimator>
<scheduled-executor-service name="default">
<capacity>100</capacity>
<durability>1</durability>
<pool-size>16</pool-size>
<merge-policy batch-size="100">com.hazelcast.spi.merge.PutIfAbsentMergePolicy</merge-policy>
</scheduled-executor-service>
<crdt-replication> <crdt-replication>
<replication-period-millis>1000</replication-period-millis> <replication-period-millis>1000</replication-period-millis>
<max-concurrent-replication-targets>1</max-concurrent-replication-targets> <max-concurrent-replication-targets>1</max-concurrent-replication-targets>
@ -395,4 +493,68 @@
<replica-count>2147483647</replica-count> <replica-count>2147483647</replica-count>
<statistics-enabled>true</statistics-enabled> <statistics-enabled>true</statistics-enabled>
</pn-counter> </pn-counter>
<metrics enabled="true">
<management-center enabled="true">
<retention-seconds>5</retention-seconds>
</management-center>
<jmx enabled="true"/>
<collection-frequency-seconds>5</collection-frequency-seconds>
</metrics>
<sql>
<statement-timeout-millis>0</statement-timeout-millis>
<catalog-persistence-enabled>false</catalog-persistence-enabled>
</sql>
<jet enabled="false" resource-upload-enabled="false">
<!-- time spacing of flow-control (ack) packets -->
<flow-control-period>100</flow-control-period>
<!-- number of backup copies to configure for Hazelcast IMaps used internally in a Jet job -->
<backup-count>1</backup-count>
<!-- the delay after which auto-scaled jobs will restart if a new member is added to the
cluster. The default is 10 seconds. Has no effect on jobs with auto scaling disabled -->
<scale-up-delay-millis>10000</scale-up-delay-millis>
<!-- Sets whether Lossless Job Restart is enabled for the node. With
Lossless Restart, Jet persists job snapshots to disk automatically
and you can restart the whole cluster without losing the jobs and
their state.
This feature requires Hazelcast Enterprise and is implemented on top of the
Persistence feature. Therefore you should enable and configure Persistence,
especially the base directory where to store the recovery files.
-->
<lossless-restart-enabled>false</lossless-restart-enabled>
<!-- Sets the maximum number of records that can be accumulated by any single
Processor instance.
Operations like grouping, sorting or joining require certain amount of
records to be accumulated before they can proceed. You can set this option
to reduce the probability of OutOfMemoryError.
This option applies to each Processor instance separately, hence the
effective limit of records accumulated by each cluster member is influenced
by the vertex's localParallelism and the number of jobs in the cluster.
Currently, maxProcessorAccumulatedRecords limits:
- number of items sorted by the sort operation
- number of distinct keys accumulated by aggregation operations
- number of entries in the hash-join lookup tables
- number of entries in stateful transforms
- number of distinct items in distinct operation
Note: the limit does not apply to streaming aggregations.
-->
<max-processor-accumulated-records>9223372036854775807</max-processor-accumulated-records>
<edge-defaults>
<!-- capacity of the concurrent SPSC queue between each two processors -->
<queue-size>1024</queue-size>
<!-- network packet size limit in bytes, only applies to distributed edges -->
<packet-size-limit>16384</packet-size-limit>
<!-- receive window size multiplier, only applies to distributed edges -->
<receive-window-multiplier>3</receive-window-multiplier>
</edge-defaults>
</jet>
<integrity-checker enabled="false"/>
</hazelcast> </hazelcast>

View File

@ -26,12 +26,12 @@
<artifactItem> <artifactItem>
<groupId>com.hazelcast</groupId> <groupId>com.hazelcast</groupId>
<artifactId>hazelcast</artifactId> <artifactId>hazelcast</artifactId>
<version>3.12.12</version> <version>5.3.1</version>
</artifactItem> </artifactItem>
<artifactItem> <artifactItem>
<groupId>com.hazelcast</groupId> <groupId>com.hazelcast</groupId>
<artifactId>hazelcast-aws</artifactId> <artifactId>hazelcast-aws</artifactId>
<version>2.4</version> <version>3.4</version>
</artifactItem> </artifactItem>
</artifactItems> </artifactItems>
<outputDirectory>lib</outputDirectory> <outputDirectory>lib</outputDirectory>

View File

@ -126,7 +126,7 @@ public class Activator implements BundleActivator {
hazelcastInstance = Hazelcast.newHazelcastInstance(config); hazelcastInstance = Hazelcast.newHazelcastInstance(config);
MapConfig mc = config.getMapConfig("default"); MapConfig mc = config.getMapConfig("default");
if (mc != null) { if (mc != null) {
logger.info("Hazelcast Max Size Config: "+mc.getMaxSizeConfig().getMaxSizePolicy() + " " + mc.getMaxSizeConfig().getSize()); logger.info("Hazelcast Max Size Config: "+mc.getEvictionConfig().getMaxSizePolicy() + " " + mc.getEvictionConfig().getSize());
} }
return; return;
} catch (FileNotFoundException e) {} } catch (FileNotFoundException e) {}
@ -142,7 +142,7 @@ public class Activator implements BundleActivator {
hazelcastInstance = Hazelcast.newHazelcastInstance(config); hazelcastInstance = Hazelcast.newHazelcastInstance(config);
MapConfig mc = config.getMapConfig("default"); MapConfig mc = config.getMapConfig("default");
if (mc != null) { if (mc != null) {
logger.info("Hazelcast Max Size Config: "+mc.getMaxSizeConfig().getMaxSizePolicy() + " " + mc.getMaxSizeConfig().getSize()); logger.info("Hazelcast Max Size Config: "+mc.getEvictionConfig().getMaxSizePolicy() + " " + mc.getEvictionConfig().getSize());
} }
return; return;
} catch (IOException e) {} } catch (IOException e) {}
@ -154,7 +154,7 @@ public class Activator implements BundleActivator {
logger.warning("Starting hazelcast with default configuration"); logger.warning("Starting hazelcast with default configuration");
MapConfig mc = config.getMapConfig("default"); MapConfig mc = config.getMapConfig("default");
if (mc != null) { if (mc != null) {
logger.info("Hazelcast Max Size Config: "+mc.getMaxSizeConfig().getMaxSizePolicy() + " " + mc.getMaxSizeConfig().getSize()); logger.info("Hazelcast Max Size Config: "+mc.getEvictionConfig().getMaxSizePolicy() + " " + mc.getEvictionConfig().getSize());
} }
} }

View File

@ -6,13 +6,13 @@ import java.util.Set;
import org.eclipse.osgi.framework.console.CommandInterpreter; import org.eclipse.osgi.framework.console.CommandInterpreter;
import org.eclipse.osgi.framework.console.CommandProvider; import org.eclipse.osgi.framework.console.CommandProvider;
import com.hazelcast.cluster.Cluster;
import com.hazelcast.cluster.Member;
import com.hazelcast.collection.ISet;
import com.hazelcast.collection.impl.set.SetService; import com.hazelcast.collection.impl.set.SetService;
import com.hazelcast.core.Cluster;
import com.hazelcast.core.DistributedObject; import com.hazelcast.core.DistributedObject;
import com.hazelcast.core.HazelcastInstance; import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap; import com.hazelcast.map.IMap;
import com.hazelcast.core.ISet;
import com.hazelcast.core.Member;
import com.hazelcast.map.impl.MapService; import com.hazelcast.map.impl.MapService;
public class CacheConsoleProvider implements CommandProvider { public class CacheConsoleProvider implements CommandProvider {

View File

@ -20,7 +20,7 @@ import java.util.concurrent.TimeUnit;
import org.idempiere.distributed.ICacheService; import org.idempiere.distributed.ICacheService;
import com.hazelcast.core.IMap; import com.hazelcast.map.IMap;
/** /**
* @author hengsin * @author hengsin

View File

@ -17,7 +17,7 @@ import java.net.InetAddress;
import org.idempiere.distributed.IClusterMember; import org.idempiere.distributed.IClusterMember;
import com.hazelcast.core.Member; import com.hazelcast.cluster.Member;
/** /**
* @author hengsin * @author hengsin
@ -39,9 +39,8 @@ public class ClusterMember implements IClusterMember {
this.port = port; this.port = port;
} }
@SuppressWarnings("deprecation")
public ClusterMember(Member member) { public ClusterMember(Member member) {
this.id = member.getUuid(); this.id = member.getUuid().toString();
this.address = member.getSocketAddress().getAddress(); this.address = member.getSocketAddress().getAddress();
this.port = member.getSocketAddress().getPort(); this.port = member.getSocketAddress().getPort();
} }

View File

@ -24,11 +24,11 @@ import java.util.concurrent.Future;
import org.idempiere.distributed.IClusterMember; import org.idempiere.distributed.IClusterMember;
import org.idempiere.distributed.IClusterService; import org.idempiere.distributed.IClusterService;
import com.hazelcast.cluster.Member;
import com.hazelcast.core.HazelcastInstance; import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IExecutorService; import com.hazelcast.core.IExecutorService;
import com.hazelcast.core.Member; import com.hazelcast.instance.impl.HazelcastInstanceImpl;
import com.hazelcast.instance.HazelcastInstanceImpl; import com.hazelcast.instance.impl.HazelcastInstanceProxy;
import com.hazelcast.instance.HazelcastInstanceProxy;
/** /**
* @author hengsin * @author hengsin
@ -75,7 +75,7 @@ public class ClusterServiceImpl implements IClusterService {
if (instance != null) { if (instance != null) {
Set<Member> members = instance.getCluster().getMembers(); Set<Member> members = instance.getCluster().getMembers();
for(Member member : members) { for(Member member : members) {
if (member.getUuid().equals(clusterMember.getId())) { if (member.getUuid().toString().equals(clusterMember.getId())) {
IExecutorService service = Activator.getHazelcastInstance().getExecutorService("default"); IExecutorService service = Activator.getHazelcastInstance().getExecutorService("default");
return service.submitToMember(task, member); return service.submitToMember(task, member);
} }
@ -99,7 +99,7 @@ public class ClusterServiceImpl implements IClusterService {
Set<Member> members = instance.getCluster().getMembers(); Set<Member> members = instance.getCluster().getMembers();
Set<Member> selectedMembers = new HashSet<Member>(); Set<Member> selectedMembers = new HashSet<Member>();
for(Member member : members) { for(Member member : members) {
if (selectedIds.contains(member.getUuid())) { if (selectedIds.contains(member.getUuid().toString())) {
selectedMembers.add(member); selectedMembers.add(member);
} }
} }

View File

@ -34,7 +34,7 @@ public class MessageServiceImpl implements IMessageService {
public <T> ITopic<T> getTopic(String name) { public <T> ITopic<T> getTopic(String name) {
HazelcastInstance instance = Activator.getHazelcastInstance(); HazelcastInstance instance = Activator.getHazelcastInstance();
if (instance != null) { if (instance != null) {
com.hazelcast.core.ITopic<T> topic = instance.getTopic(name); com.hazelcast.topic.ITopic<T> topic = instance.getTopic(name);
return new TopicImpl<T>(topic); return new TopicImpl<T>(topic);
} else { } else {
return null; return null;

View File

@ -17,12 +17,13 @@ import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.UUID;
import org.idempiere.distributed.ITopic; import org.idempiere.distributed.ITopic;
import org.idempiere.distributed.ITopicSubscriber; import org.idempiere.distributed.ITopicSubscriber;
import com.hazelcast.core.Message; import com.hazelcast.topic.Message;
import com.hazelcast.core.MessageListener; import com.hazelcast.topic.MessageListener;
/** /**
* @author hengsin * @author hengsin
@ -30,7 +31,7 @@ import com.hazelcast.core.MessageListener;
*/ */
public class TopicImpl<E> implements ITopic<E> { public class TopicImpl<E> implements ITopic<E> {
private com.hazelcast.core.ITopic<E> topic; private com.hazelcast.topic.ITopic<E> topic;
private List<TopicSubscriberAdapter<E>> adapters; private List<TopicSubscriberAdapter<E>> adapters;
private Map<TopicSubscriberAdapter<E>, String> registrationMap; private Map<TopicSubscriberAdapter<E>, String> registrationMap;
@ -38,7 +39,7 @@ public class TopicImpl<E> implements ITopic<E> {
/** /**
* *
*/ */
public TopicImpl(com.hazelcast.core.ITopic<E> topic) { public TopicImpl(com.hazelcast.topic.ITopic<E> topic) {
this.topic = topic; this.topic = topic;
adapters = new ArrayList<TopicSubscriberAdapter<E>>(); adapters = new ArrayList<TopicSubscriberAdapter<E>>();
registrationMap = new HashMap<>(); registrationMap = new HashMap<>();
@ -52,7 +53,7 @@ public class TopicImpl<E> implements ITopic<E> {
@Override @Override
public void subscribe(final ITopicSubscriber<E> subscriber) { public void subscribe(final ITopicSubscriber<E> subscriber) {
TopicSubscriberAdapter<E> adapter = new TopicSubscriberAdapter<E>(subscriber); TopicSubscriberAdapter<E> adapter = new TopicSubscriberAdapter<E>(subscriber);
String registrationId = topic.addMessageListener(adapter); String registrationId = topic.addMessageListener(adapter).toString();
adapters.add(adapter); adapters.add(adapter);
registrationMap.put(adapter, registrationId); registrationMap.put(adapter, registrationId);
} }
@ -64,7 +65,7 @@ public class TopicImpl<E> implements ITopic<E> {
if (adapter.subscriber == subscriber) { if (adapter.subscriber == subscriber) {
found = adapter; found = adapter;
String registrationId = registrationMap.get(adapter); String registrationId = registrationMap.get(adapter);
if (topic.removeMessageListener(registrationId)) if (topic.removeMessageListener(UUID.fromString(registrationId)))
registrationMap.remove(adapter); registrationMap.remove(adapter);
break; break;
} }