Perşembe, Nisan 16, 2015

Centos 6 üzerinde BDR ile multi master Postgresql server kurulumu

node1
    10.0.3.21

node2
    10.0.3.22


Öncelikle varsa eski kurulumu kaldırıyoruz.

yum remove postgresql94\*
rm -rf /var/lib/pgsql/

BDR reposunu ekleyelim

yum install http://packages.2ndquadrant.com/postgresql-bdr94-2ndquadrant/yum-repo-rpms/postgresql-bdr94-2ndquadrant-redhat-1.0-2.noarch.rpm
 Kurulum

yum install postgresql-bdr94-bdr
su - postgres
PGDATA=/var/lib/pgsql/9.4-bdr/data PGENGINE=/usr/pgsql-9.4/bin  /usr/pgsql-9.4/bin/postgresql94-setup initdb
pg_hba.conf dosyasına eklenmesi gerekenler

host    all             all             127.0.0.1/32            trust
host    all             all             10.0.3.0/24              trust
local   replication     postgres                               trust
host    replication     postgres        127.0.0.1/32   trust
host    replication     postgres        10.0.3.0/24     trust
host    replication     postgres        ::1/128            trust
postgresql.conf dosyasına eklenmesi gerekenler

listen_addresses = '*'
shared_preload_libraries = 'bdr'
wal_level = 'logical'
track_commit_timestamp = on
max_connections = 100
max_wal_senders = 10
max_replication_slots = 10
max_worker_processes = 10
 # /etc/init.d/postgresql-9.4 restart

 db yi oluşturup BDR ayarlarını yapalım

Node 1

createdb -U postgres testdb
 psql -U postgres testdb

testdb=# CREATE EXTENSION btree_gist;
testdb=# CREATE EXTENSION bdr;

testdb=# SELECT bdr.bdr_group_create(
      local_node_name := 'node1',
      node_external_dsn := 'host=10.0.3.21 port=5432 dbname=testdb'
);

 testdb=# SELECT bdr.bdr_node_join_wait_for_ready();



Node 2

createdb -U postgres testdb
 psql -U postgres testdb

testdb=# CREATE EXTENSION btree_gist;
testdb=# CREATE EXTENSION bdr;

testdb=# SELECT bdr.bdr_group_join(
      local_node_name := 'node2',
      node_external_dsn := 'host=10.0.3.22 port=5432 dbname=testdb',
      join_using_dsn := 'host=10.0.3.21 port=5432 dbname=testdb'
);

testdb=# SELECT bdr.bdr_node_join_wait_for_ready();


TEST

Node 1
CREATE TABLE t1bdr (c1 INT, PRIMARY KEY (c1));
 INSERT INTO t1bdr VALUES (1);
 INSERT INTO t1bdr VALUES (2);

 SELECT * FROM t1bdr;


Node 1

SELECT * FROM t1bdr;




Cuma, Nisan 10, 2015

Centos 6.6 üzerinde Redis 3 Cluster Kurulumu


Node 1 (10.0.3.91)
master  6379
slave   6380

Node 2 (10.0.3.92)
master  6379
slave   6380

Node 3 (10.0.3.93)
master  6379
slave   6380


gerekli paketler

yum groupinstall 'Development Tools'
yum install ruby rubygems wget
gem install redis


Kurulum (3 node için ortak)

cd /usr/local/src
wget http://download.redis.io/releases/redis-3.0.0.tar.gz
tar zxvf redis-3.0.0.tar.gz
cd redis-3.0.0
make


Node 1

cd /usr/local/src
mkdir mycluster
cd mycluster
mkdir 6379 6380
cat < 6379/redis.conf
port 6379
cluster-enabled yes
cluster-config-file nodes6379.conf
cluster-node-timeout 5000
appendonly yes
EOF
cat < 6380/redis.conf
port 6380
cluster-enabled yes
cluster-config-file nodes6379.conf
cluster-node-timeout 5000
appendonly yes
EOF

cd 6379
nohup ../../redis-server ../redis.conf &
cd ../6380
nohup ../../redis-server ../redis.conf &


Node 2

cd /usr/local/src
mkdir mycluster
cd mycluster
mkdir 6379 6380 
cat < 6379/redis.conf
port 6379
cluster-enabled yes
cluster-config-file nodes6379.conf
cluster-node-timeout 5000
appendonly yes
EOF
 
cat < 6380/redis.conf
port 6380
cluster-enabled yes
cluster-config-file nodes6379.conf
cluster-node-timeout 5000
appendonly yes
EOF

cd 6379
nohup ../../redis-server ../redis.conf &
cd ../6380
nohup ../../redis-server ../redis.conf &


Node 3

cd /usr/local/src
mkdir mycluster
cd mycluster
mkdir 6379 6380 
cat < 6379/redis.conf
port 6379
cluster-enabled yes
cluster-config-file nodes6379.conf
cluster-node-timeout 5000
appendonly yes
EOF
 
cat < 6380/redis.conf
port 6380
cluster-enabled yes
cluster-config-file nodes6379.conf
cluster-node-timeout 5000
appendonly yes
EOF

cd 6379
nohup ../../redis-server ../redis.conf &
cd ../6380
nohup ../../redis-server ../redis.conf &


cluster ayarları node 1 üzerinde

cd /usr/local/src/redis-3.0.0
./redis-trib.rb create --replicas 1 10.0.3.91:6379 10.0.3.91:6380 10.0.3.92:6379 10.0.3.92:6380 10.0.3.93:6379 10.0.3.93:6380


>>> Creating cluster
Connecting to node 10.0.3.91:6379: OK
Connecting to node 10.0.3.91:6380: OK
Connecting to node 10.0.3.92:6379: OK
Connecting to node 10.0.3.92:6380: OK
Connecting to node 10.0.3.93:6379: OK
Connecting to node 10.0.3.93:6380: OK
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
10.0.3.93:6379
10.0.3.92:6379
10.0.3.91:6379
Adding replica 10.0.3.92:6380 to 10.0.3.93:6379
Adding replica 10.0.3.93:6380 to 10.0.3.92:6379
Adding replica 10.0.3.91:6380 to 10.0.3.91:6379
   slots:10923-16383 (5461 slots) master                                                                                                                                  
S: 9dbe050bb9fdd90705ccf7ca0197010c227eeca3 10.0.3.91:6380                                                                                                          
   replicates 5f01251d0016fb4f7b48576ff6cb25584cbb7460                                                                                                                    
M: 7ac5c6c23abb8fd8ec74f94aaf75b326a9d57419 10.0.3.92:6379                                                                                                          
   slots:5461-10922 (5462 slots) master                                                                                                                                   
S: e6530d1b98897cab0a051c28b50bd6a91704d93e 10.0.3.92:6380                                                                                                          
   replicates 3b33d7bb2544a0e4f8954e22f3f1ec8bb5e93ede                                                                                                                    
M: 3b33d7bb2544a0e4f8954e22f3f1ec8bb5e93ede 10.0.3.93:6379                                                                                                          
   slots:0-5460 (5461 slots) master                                                                                                                                       
S: 60620c4ae3b463f936acc55b577cf80b91104adf 10.0.3.93:6380                                                                                                          
   replicates 7ac5c6c23abb8fd8ec74f94aaf75b326a9d57419                                                                                                                    
Can I set the above configuration? (type 'yes' to accept): yes                                                                                                            
>>> Nodes configuration updated                                                                                                                                           
>>> Assign a different config epoch to each node                                                                                                                          
>>> Sending CLUSTER MEET messages to join the cluster                                                                                                                     
Waiting for the cluster to join...                                                                                                                                        
>>> Performing Cluster Check (using node 10.0.3.91:6379)                                                                                                            
M: 5f01251d0016fb4f7b48576ff6cb25584cbb7460 10.0.3.91:6379                                                                                                          
   slots:10923-16383 (5461 slots) master                                                                                                                                  
M: 9dbe050bb9fdd90705ccf7ca0197010c227eeca3 10.0.3.91:6380                                                                                                          
   slots: (0 slots) master                                                                                                                                                
   replicates 5f01251d0016fb4f7b48576ff6cb25584cbb7460                                                                                                                    
M: 7ac5c6c23abb8fd8ec74f94aaf75b326a9d57419 10.0.3.92:6379                                                                                                          
   slots:5461-10922 (5462 slots) master                                                                                                                                   
M: e6530d1b98897cab0a051c28b50bd6a91704d93e 10.0.3.92:6380                                                                                                          
   slots: (0 slots) master                                                                                                                                                
   replicates 3b33d7bb2544a0e4f8954e22f3f1ec8bb5e93ede                                                                                                                    
M: 3b33d7bb2544a0e4f8954e22f3f1ec8bb5e93ede 10.0.3.93:6379                                                                                                          
   slots:0-5460 (5461 slots) master                                                                                                                                       
M: 60620c4ae3b463f936acc55b577cf80b91104adf 10.0.3.93:6380                                                                                                          
   slots: (0 slots) master                                                                                                                                                
   replicates 7ac5c6c23abb8fd8ec74f94aaf75b326a9d57419                                                                                                                    
[OK] All nodes agree about slots configuration.                                                                                                                           
>>> Check for open slots...                                                                                                                                               
>>> Check slots coverage...                                                                                                                                               
[OK] All 16384 slots covered.



Test
--------
git clone https://github.com/antirez/redis-rb-cluster.git
cd redis-rb-cluster
vi example.rb

  require './cluster'
  startup_nodes = [
      {:host => "10.0.3.91", :port => 6379},
      {:host => "10.0.3.92", :port => 6379},
      {:host => "10.0.3.93", :port => 6379}
  ]
  rc = RedisCluster.new(startup_nodes,32,:timeout => 0.1)

  last = false

  while not last
      begin
          last = rc.get("__last__")
          last = 0 if !last
      rescue => e
          puts "error #{e.to_s}"
          sleep 1
      end
  end

  ((last.to_i+1)..1000000000).each{|x|
      begin
          rc.set("foo#{x}",x)
          puts rc.get("foo#{x}")
          rc.set("__last__",x)
      rescue => e
          puts "error #{e.to_s}"
      end
      sleep 0.1
  }

./example.rb