[Debian-ha-maintainers] Bug#1054326: Bug#1054326: Can't start golbal heartbeat when drbd device is on top LVM device

Daniel Smolik marvin at mydatex.cz
Sun Oct 22 11:11:19 BST 2023


Yes no problem. There it is.

Dne 22. 10. 23 v 11:57 Valentin Vidic napsal(a):
> On Sat, Oct 21, 2023 at 11:19:50PM +0200, Dan Smolik wrote:
>> in virtual enviroment I try build 2 node ocfs cluster. When drbd
>> device is on top of  md device all works. But when drbd device is on
>> top LVM device global heartbeat doesn't start.
>>
>> Using config file '/etc/ocfs2/cluster.conf'
> Maybe you can share more info, like what the cluster.conf looks like
> in this setup?
>
-------------- next part --------------
resource drbdocfs01 {


        options {
                on-no-data-accessible suspend-io;
        }


        #disk {
        #on-io-error             detach;
        #no-disk-flushes ;
        #no-disk-barrier;
        #c-plan-ahead 0;
        #c-fill-target 24M;
        #c-min-rate 80M;
        #c-max-rate 720M;
        #} 





        net {
                cram-hmac-alg "sha1";
               # shared-secret "secret_string";
                allow-two-primaries;
                after-sb-0pri discard-zero-changes;
                after-sb-1pri discard-secondary;
                after-sb-2pri disconnect;
                
                #max-buffers             36k;
                #sndbuf-size            1024k ;
                #rcvbuf-size            2048k;


        }



 on drbd-server01   {
                address  192.168.64.1:7789;

                volume 0  {
        
                device minor 0;
                meta-disk internal;
                    disk /dev/c/o;
                }
    
  }


 on drbd-server02 {
        address  192.168.64.2:7789;
        
        volume 0  {
        
                device minor 0;
                meta-disk internal;
        
                disk /dev/c/o;
        }

    
  }







}
-------------- next part --------------
cluster:
	name = mdtxcluster
	heartbeat_mode = global
	node_count = 2

heartbeat:
	cluster = mdtxcluster
	region = 3D69F9BD7AF24C59BB600A8D7B1D4770


node:
	cluster = mdtxcluster
	number = 0
	ip_port = 7777
	ip_address = 192.168.64.1
	name = drbd-server01

node:
	cluster = mdtxcluster
	number = 1
	ip_port = 7777
	ip_address = 192.168.64.2
	name = drbd-server02

-------------- next part --------------
resource drbdocfs01 {


        options {
                on-no-data-accessible suspend-io;
        }


        #disk {
        #on-io-error             detach;
        #no-disk-flushes ;
        #no-disk-barrier;
        #c-plan-ahead 0;
        #c-fill-target 24M;
        #c-min-rate 80M;
        #c-max-rate 720M;
        #} 





        net {
                cram-hmac-alg "sha1";
               # shared-secret "secret_string";
                allow-two-primaries;
                after-sb-0pri discard-zero-changes;
                after-sb-1pri discard-secondary;
                after-sb-2pri disconnect;
                
                #max-buffers             36k;
                #sndbuf-size            1024k ;
                #rcvbuf-size            2048k;


        }



  on drbd-server01   {
                address  192.168.64.1:7789;

                volume 0  {
        
                device minor 0;
                meta-disk internal;
                    disk /dev/c/o;
                }
    
  }



  on drbd-server02 {
        address  192.168.64.2:7789;
        
        volume 0  {
        
                device minor 0;
                meta-disk internal;
        
                disk /dev/c/o;
        }

    
  }




}
-------------- next part --------------
cluster:
	name = mdtxcluster
	heartbeat_mode = global
	node_count = 2

node:
	cluster = mdtxcluster
	number = 0
	ip_port = 7777
	ip_address = 192.168.64.1
	name = drbd-server01

node:
	cluster = mdtxcluster
	number = 1
	ip_port = 7777
	ip_address = 192.168.64.2
	name = drbd-server02



More information about the Debian-ha-maintainers mailing list