The IoT Data Controller Audit module contains pre-configured discovery service, however, in some specific cases, there is a configuration modification needed. Namely:

  • Running BellaDati IoT Data Controller on Microsoft Azure or AWS
  • Manually discovering modules in environments, where multicast is not possible
  • Forcing to discover specific modules only

This configuration is stored in cluster.xml configuration file.

Please see Hazelcast documentation for more details. Ask your network administrator if you are unsure about the configuration.

Configuration example

<?xml version="1.0" encoding="UTF-8"?>
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.6.xsd" xmlns="http://www.hazelcast.com/schema/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
    <properties>
    	<property name="hazelcast.discovery.enabled">true</property>
        <property name="hazelcast.memcache.enabled">true</property>
        <property name="hazelcast.rest.enabled">false</property>
        <property name="hazelcast.wait.seconds.before.join">0</property>
        <property name="hazelcast.logging.type">slf4j</property>
        <property name="hazelcast.health.monitoring.delay.seconds">2</property>
        <property name="hazelcast.max.no.heartbeat.seconds">30</property>
        <property name="hazelcast.max.no.master.confirmation.seconds">10</property>
        <property name="hazelcast.master.confirmation.interval.seconds">10</property>
        <property name="hazelcast.member.list.publish.interval.seconds">10</property>
        <property name="hazelcast.connection.monitor.interval">10</property>
        <property name="hazelcast.connection.monitor.max.faults">2</property>
        <property name="hazelcast.partition.migration.timeout">10</property>
        <property name="hazelcast.migration.min.delay.on.member.removed.seconds">3</property>
    </properties>
    
    <network>
        <port auto-increment="true" port-count="10000">5701</port>
        
        <join>
            <!-- Discovering cluster members by Multicast. Not supported on: AWS, MS Azure -->
            <multicast enabled="false" />
            
            <!-- Discovering cluster members by TCP/IP -->
            <tcp-ip enabled="true">
            	<interface>127.0.0.1</interface>
                <!-- <member>10.0.2.112</member> audit -->
<!--                 <member>10.0.2.41</member> receiver1 -->
<!--                 <member>10.0.2.238</member> receiver2 -->
<!--                 <member>10.0.2.162</member> receiver3 -->
                <!-- <member>127.0.0.1</member> local bind is enabled by default -->
            </tcp-ip>
        </join>
        <interfaces enabled="true">
        	<interface>127.0.0.1</interface>
        </interfaces>

        <ssl enabled="false" />
        <socket-interceptor enabled="false" />

        <symmetric-encryption enabled="true">
            <!-- encryption algorithm such as DES/ECB/PKCS5Padding, PBEWithMD5AndDES, AES/CBC/PKCS5Padding, Blowfish, DESede -->
            <algorithm>PBEWithMD5AndDES</algorithm>
            <!-- salt value to use when generating the secret key -->
            <salt>thesalt</salt>
            <!-- pass phrase to use when generating the secret key -->
            <password>thepass</password>
            <!-- iteration count to use when generating the secret key -->
            <iteration-count>19</iteration-count>
        </symmetric-encryption>
    </network>
    <partition-group enabled="false" />
    <executor-service name="default">
        <pool-size>16</pool-size>
        <!--Queue capacity. 0 means Integer.MAX_VALUE. -->
        <queue-capacity>0</queue-capacity>
    </executor-service>
    <map name="__vertx.subs">
        <!-- Number of backups. If 1 is set as the backup-count for example, then all entries of the map will be copied to another JVM for fail-safety. 0 means no backup. -->
        <backup-count>1</backup-count>
        <!-- Maximum number of seconds for each entry to stay in the map. Entries that are older than <time-to-live-seconds> and not updated for <time-to-live-seconds> will get automatically evicted from 
            the map. Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. -->
        <time-to-live-seconds>15</time-to-live-seconds>
        <!-- Maximum number of seconds for each entry to stay idle in the map. Entries that are idle(not touched) for more than <max-idle-seconds> will get automatically evicted from the map. Entry is 
            touched if get, put or containsKey is called. Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. -->
        <max-idle-seconds>10</max-idle-seconds>
        <!-- Valid values are: NONE (no eviction), LRU (Least Recently Used), LFU (Least Frequently Used). NONE is the default. -->
        <eviction-policy>LRU</eviction-policy>
        <!-- Maximum size of the map. When max size is reached, map is evicted based on the policy defined. Any integer between 0 and Integer.MAX_VALUE. 0 means Integer.MAX_VALUE. Default is 0. -->
        <max-size policy="USED_HEAP_PERCENTAGE">50</max-size>
        <!-- When max. size is reached, specified percentage of the map will be evicted. Any integer between 0 and 100. If 25 is set for example, 25% of the entries will get evicted. -->
        <eviction-percentage>25</eviction-percentage>
        <!-- While recovering from split-brain (network partitioning), map entries in the small cluster will merge into the bigger cluster based on the policy set here. When an entry merge into the cluster, 
            there might an existing entry with the same key already. Values of these entries might be different for that same key. Which value should be set for the key? Conflict is resolved by the policy set here. 
            Default policy is PutIfAbsentMapMergePolicy There are built-in merge policies such as com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key. com.hazelcast.map.merge.PutIfAbsentMapMergePolicy 
            ; entry will be added if the merging entry doesn't exist in the cluster. com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins. com.hazelcast.map.merge.LatestUpdateMapMergePolicy 
            ; entry with the latest update wins. -->
        <merge-policy>com.hazelcast.map.merge.LatestUpdateMapMergePolicy</merge-policy>
    </map>
    <!-- Used internally in Vert.x to implement async locks -->
    <semaphore name="__vertx.*">
        <initial-permits>1</initial-permits>
    </semaphore>
</hazelcast>
  • No labels