简体   繁体   中英

Java client to Artemis cluster

I have created 3 artemis master brokers on 3 machines. Each master has a slave node running on same machine. JGroup is used for this cluster. Now I want only one active server and remaining 2 as passive servers. When I connect to cluster, it seem load balancer sends my request to one of the servers ( May be round robin based?).

My configuration is

<configuration xmlns= .. >

<core xmlns="urn:activemq:core">
    <bindings-directory>./data/bindings</bindings-directory>
    <journal-directory>./data/journal</journal-directory>
    <large-messages-directory>./data/largemessages</large-messages-directory>
    <paging-directory>./data/paging</paging-directory>
    <connectors>
        <connector name="netty-connector">tcp://10.168.100.41:61616</connector>
    </connectors>

    <acceptors>
        <acceptor name="netty-acceptor">tcp://10.168.100.41:61616</acceptor>
    </acceptors>

    <broadcast-groups>
        <broadcast-group name="Artemis-broadcast-group">
            <broadcast-period>5000</broadcast-period>
            <jgroups-file>test-jgroups-file_ping.xml</jgroups-file>
            <jgroups-channel>active_broadcast_channel</jgroups-channel>
            <connector-ref>netty-connector</connector-ref>
        </broadcast-group>
    </broadcast-groups>

    <discovery-groups>
        <discovery-group name="Artemis-discovery-group">
            <jgroups-file>test-jgroups-file_ping.xml</jgroups-file>
            <jgroups-channel>active_broadcast_channel</jgroups-channel>
            <refresh-timeout>10000</refresh-timeout>
        </discovery-group>
    </discovery-groups>

    <cluster-connections>
        <cluster-connection name="Demo-Artemis-Cluster">
            <address></address>
            <connector-ref>netty-connector</connector-ref>
            <check-period>1000</check-period>
            <connection-ttl>5000</connection-ttl>
            <min-large-message-size>50000</min-large-message-size>
            <call-timeout>5000</call-timeout>
            <retry-interval>500</retry-interval>
            <retry-interval-multiplier>1.0</retry-interval-multiplier>
            <max-retry-interval>5000</max-retry-interval>
            <initial-connect-attempts>-1</initial-connect-attempts>
            <reconnect-attempts>-1</reconnect-attempts>
            <use-duplicate-detection>true</use-duplicate-detection>
            <message-load-balancing>STRICT</message-load-balancing>
            <max-hops>1</max-hops>
            <confirmation-window-size>32000</confirmation-window-size>
            <call-failover-timeout>30000</call-failover-timeout>
            <notification-interval>1000</notification-interval>
            <notification-attempts>2</notification-attempts>
            <discovery-group-ref discovery-group-name="Artemis-discovery-group" />
        </cluster-connection>
    </cluster-connections>

    <cluster-user>admin</cluster-user>
    <cluster-password>admin</cluster-password>

    <security-settings>
        <security-setting match="#">
            <permission type="createNonDurableQueue" roles="amq" />
            <permission type="deleteNonDurableQueue" roles="amq" />
            <permission type="createDurableQueue" roles="amq" />
            <permission type="deleteDurableQueue" roles="amq" />
            <permission type="createAddress" roles="amq" />
            <permission type="deleteAddress" roles="amq" />
            <permission type="consume" roles="amq" />
            <permission type="browse" roles="amq" />
            <permission type="send" roles="amq" />
            <permission type="manage" roles="amq" />
        </security-setting>
    </security-settings>

    <address-settings>
        <address-setting match="activemq.management#">
            <dead-letter-address>DLQ</dead-letter-address>
            <expiry-address>ExpiryQueue</expiry-address>
            <redelivery-delay>0</redelivery-delay>
            <max-size-bytes>-1</max-size-bytes>
            <message-counter-history-day-limit>10</message-counter-history-day-limit>
            <address-full-policy>PAGE</address-full-policy>
            <auto-create-queues>true</auto-create-queues>
            <auto-create-addresses>true</auto-create-addresses>
            <auto-create-jms-queues>true</auto-create-jms-queues>
            <auto-create-jms-topics>true</auto-create-jms-topics>
        </address-setting>

        <address-setting match="#">
            <dead-letter-address>DLQ</dead-letter-address>
            <expiry-address>ExpiryQueue</expiry-address>
            <redelivery-delay>0</redelivery-delay>
            <max-size-bytes>-1</max-size-bytes>
            <message-counter-history-day-limit>10</message-counter-history-day-limit>
            <address-full-policy>PAGE</address-full-policy>
            <auto-create-queues>true</auto-create-queues>
            <auto-create-addresses>true</auto-create-addresses>
            <auto-create-jms-queues>true</auto-create-jms-queues>
            <auto-create-jms-topics>true</auto-create-jms-topics>
        </address-setting>
    </address-settings>

    <addresses>
        <address name="DLQ">
            <anycast>
                <queue name="DLQ" />
            </anycast>
        </address>
        <address name="ExpiryQueue">
            <anycast>
                <queue name="ExpiryQueue" />
            </anycast>
        </address>

    </addresses>

    <ha-policy>
        <replication>
            <master>
                <check-for-live-server>true</check-for-live-server>
            </master>
        </replication>
    </ha-policy>


</core>

Other 2 servers has similar config except ip in connector and acceptor.

test-jgroups-file is

<config xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:org:jgroups"
xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/jgroups.xsd">
<TCP recv_buf_size="${tcp.recv_buf_size:5M}"
    send_buf_size="${tcp.send_buf_size:5M}"
    max_bundle_size="64K"
    max_bundle_timeout="30"
    sock_conn_timeout="300"

    timer_type="new3"
    timer.min_threads="4"
    timer.max_threads="10"
    timer.keep_alive_time="3000"
    timer.queue_max_size="500"

    thread_pool.enabled="true"
    thread_pool.min_threads="2"
    thread_pool.max_threads="8"
    thread_pool.keep_alive_time="5000"
    thread_pool.queue_enabled="true"
    thread_pool.queue_max_size="10000"
    thread_pool.rejection_policy="discard"

    oob_thread_pool.enabled="true"
    oob_thread_pool.min_threads="1"
    oob_thread_pool.max_threads="8"
    oob_thread_pool.keep_alive_time="5000"
    oob_thread_pool.queue_enabled="false"
    oob_thread_pool.queue_max_size="100"
    oob_thread_pool.rejection_policy="discard"/>

    <TRACE/>

<JDBC_PING connection_url="connection_url" connection_username="username" connection_password="password" connection_driver="org.postgresql.Driver" />

<MERGE3  min_interval="10000"
        max_interval="30000"/>
<FD_SOCK/>
<FD timeout="3000" max_tries="3" />
<VERIFY_SUSPECT timeout="1500"  />
<BARRIER />
<pbcast.NAKACK2 use_mcast_xmit="false"
                discard_delivered_msgs="true"/>
<UNICAST3 />
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
            max_bytes="4M"/>
<pbcast.GMS print_local_addr="true" join_timeout="2000"
            view_bundling="true"/>
<MFC max_credits="2M"
    min_threshold="0.4"/>
<FRAG2 frag_size="60K"  />
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>

On client side, I did this for both sender and receiver.

 jndiProp.put("java.naming.factory.initial", "org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory");

 jndiProp.put("connectionFactory.myConnectionFactory", "(tcp://10.168.100.41:61616,tcp://10.168.100.42:61616,tcp://10.168.100.43:61616)?ha=true");

I want one server to process all requests and other 2 in standby. OR Is it possible that sender use one connection/session to send message and receiver also use one line to read. In current scenerio, I have to check all 3 servers for messages on client side. I read it has something to do with message-load-balancing so I changed value from ON_DEMAND to STRICT. But seems that's not enough. Any help regarding this? TIA

If you want to be able to connect to any node in the cluster and consume messages sent to any other node in the cluster then you need to set your redistribution-delay to something >= 0. The default redistribution-delay is -1 which means messages will never be redistributed. The documentation is pretty clear about this. Here's an example configuration:

<address-settings>
   <address-setting match="#">
      <redistribution-delay>0</redistribution-delay>
   </address-setting>
</address-settings>

Also, your message-load-balancing must be ON_DEMAND .

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM