New config is rejected :: caused by :: replSetReconfig should only be run on a writable PRIMARY. Current state REMOVED

Hi there,
When iam trying the add the 2nd node to replicaset ,reported the below error… and also how do i reset the replicaset .

When adding the secondary node

> rs.add("psmdb2")
{
        "topologyVersion" : {
                "processId" : ObjectId("61d5abf2404700b3e321b7b7"),
                "counter" : NumberLong(1)
        },
        "ok" : 0,
`        "errmsg" : "New config is rejected :: caused by :: replSetReconfig should only be run on a writable PRIMARY. Current state REMOVED;",`
        "code" : 10107,
        "codeName" : "NotWritablePrimary"
}

Checked the Replication Status

> rs.status()
{
        "ok" : 0,
        **"errmsg" : "Our replica set config is invalid or we are not a member of it",**
        "code" : 93,
        "codeName" : "InvalidReplicaSetConfig"
}
>

Current configuration

> rs.conf()
{
        "_id" : "psmdb-replica01",
        "version" : 3,
        "term" : 3,
        "members" : [
                {
                        "_id" : 0,
                        "host" : "192.168.0.1:27017",
                        "arbiterOnly" : false,
                        "buildIndexes" : true,
                        "hidden" : false,
                        "priority" : 1,
                        "tags" : {

                        },
                        "secondaryDelaySecs" : NumberLong(0),
                        "votes" : 1
                },
                {
                        "_id" : 1,
                        "host" : "psmdb2:27017",
                        "arbiterOnly" : false,
                        "buildIndexes" : true,
                        "hidden" : false,
                        "priority" : 1,
                        "tags" : {

                        },
                        "secondaryDelaySecs" : NumberLong(0),
                        "votes" : 1
                },
                {
                        "_id" : 2,
                        "host" : "psmdb3:27017",
                        "arbiterOnly" : false,
                        "buildIndexes" : true,
                        "hidden" : false,
                        "priority" : 1,
                        "tags" : {

                        },
                        "secondaryDelaySecs" : NumberLong(0),
                        "votes" : 1
                }
        ],
        "protocolVersion" : NumberLong(1),
        "writeConcernMajorityJournalDefault" : true,
        "settings" : {
                "chainingAllowed" : true,
                "heartbeatIntervalMillis" : 2000,
                "heartbeatTimeoutSecs" : 10,
                "electionTimeoutMillis" : 10000,
                "catchUpTimeoutMillis" : -1,
                "catchUpTakeoverDelayMillis" : 30000,
                "getLastErrorModes" : {

                },
                "getLastErrorDefaults" : {
                        "w" : 1,
                        "wtimeout" : 0
                },
                "replicaSetId" : ObjectId("61d3ece7b440de1916af7c8a")
        }
}
>
1 Like

Hi @AneeshBabu,

By the outputs, it’s not clear if the node that you are trying to run rs.add() is the PRIMARY on that replica.

How did you initialize the Replica Set? have you run rs.initiate()?

Please send the output from the following command on that node:
db.isMaster()

Alongside with the mongo.log from all nodes for further understanding.

Best,

Jean.

1 Like

@jenunes
Yes iam trying to run the command rs.add() is the PRIMARY(node 1).
Below are the outputs …

Output of rs.initiate() at PRIMARY(node 1)

> rs.initiate()
{
        "ok" : 0,
        "errmsg" : "already initialized",
        "code" : 23,
        "codeName" : "AlreadyInitialized"
}

Output of db.isMaster() at PRIMARY(node 1)

> db.isMaster()
{
        "topologyVersion" : {
                "processId" : ObjectId("61d7dcfd577dc28a45c1a42b"),
                "counter" : NumberLong(1)
        },
        "ismaster" : false,
        "secondary" : false,
        "info" : "Does not have a valid replica set config",
        "isreplicaset" : true,
        "maxBsonObjectSize" : 16777216,
        "maxMessageSizeBytes" : 48000000,
        "maxWriteBatchSize" : 100000,
        "localTime" : ISODate("2022-01-07T18:08:00.696Z"),
        "logicalSessionTimeoutMinutes" : 30,
        "connectionId" : 25,
        "minWireVersion" : 0,
        "maxWireVersion" : 13,
        "readOnly" : false,
        "ok" : 1
}

mongod.log file => PRIMARY(node1)


{"t":{"$date":"2022-01-07T23:37:38.349+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":129800}}
{"t":{"$date":"2022-01-07T23:38:10.529+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641578890:529494][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1410, snapshot max: 1410 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:39:10.534+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641578950:534956][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1412, snapshot max: 1412 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:39:48.244+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":130000}}
{"t":{"$date":"2022-01-07T23:40:10.539+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579010:539448][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1414, snapshot max: 1414 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:40:45.808+05:30"},"s":"I",  "c":"COMMAND",  "id":21577,   "ctx":"conn25","msg":"Initiate: no configuration specified. Using a default configuration for the set"}
{"t":{"$date":"2022-01-07T23:40:45.808+05:30"},"s":"I",  "c":"COMMAND",  "id":21578,   "ctx":"conn25","msg":"Created configuration for initiation","attr":{"config":"{ _id: \"psmdb1\", version: 1, members: [ { _id: 0, host: \"192.168.0.1:27017\" } ] }"}}
{"t":{"$date":"2022-01-07T23:40:45.808+05:30"},"s":"I",  "c":"REPL",     "id":21356,   "ctx":"conn25","msg":"replSetInitiate admin command received from client"}
{"t":{"$date":"2022-01-07T23:41:10.545+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579070:545387][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1416, snapshot max: 1416 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:41:21.692+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:41:21.692+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:41:21.693+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:41:21.693+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:41:58.335+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":130200}}
{"t":{"$date":"2022-01-07T23:42:10.550+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579130:550719][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1418, snapshot max: 1418 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:43:10.556+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579190:556364][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1420, snapshot max: 1420 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:44:08.598+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":130400}}
{"t":{"$date":"2022-01-07T23:44:10.562+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579250:562517][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1422, snapshot max: 1422 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:45:10.567+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579310:567634][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1424, snapshot max: 1424 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:46:10.573+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579370:573399][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1426, snapshot max: 1426 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:46:19.098+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":130600}}
{"t":{"$date":"2022-01-07T23:46:21.692+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:46:21.692+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:46:21.693+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:46:21.693+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:47:10.578+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579430:578126][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1428, snapshot max: 1428 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:48:10.584+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579490:584029][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1430, snapshot max: 1430 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:48:29.780+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":130800}}
{"t":{"$date":"2022-01-07T23:49:10.588+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579550:588587][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1432, snapshot max: 1432 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:50:10.593+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579610:593774][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1434, snapshot max: 1434 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:50:40.678+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":131000}}
{"t":{"$date":"2022-01-07T23:51:10.599+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579670:599075][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1436, snapshot max: 1436 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:51:21.692+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:51:21.692+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:51:21.693+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:51:21.693+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:52:10.606+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579730:606706][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1438, snapshot max: 1438 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:52:51.777+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":131200}}
{"t":{"$date":"2022-01-07T23:53:10.612+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579790:612253][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1440, snapshot max: 1440 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:54:10.617+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579850:617335][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1442, snapshot max: 1442 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:55:03.076+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":131400}}
{"t":{"$date":"2022-01-07T23:55:10.622+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579910:622885][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1444, snapshot max: 1444 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:56:10.628+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579970:628067][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1446, snapshot max: 1446 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:56:21.692+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:56:21.692+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:56:21.693+05:30"},"s":"I",  "c":"NETWORK",  "id":4333208, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM host selection timeout","attr":{"replicaSet":"psmdb-replica01","error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:56:21.693+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \"primary\" } for set psmdb-replica01"}}
{"t":{"$date":"2022-01-07T23:56:53.274+05:30"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn25","msg":"Connection ended","attr":{"remote":"192.168.0.1:38996","uuid":"04776544-4068-4c7c-8eef-29d10bfbf2e2","connectionId":25,"connectionCount":2}}
{"t":{"$date":"2022-01-07T23:57:10.634+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641580030:634882][18291:0x7f998eade700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1448, snapshot max: 1448 snapshot count: 0, oldest timestamp: (1641392816, 1) , meta checkpoint timestamp: (1641393116, 1) base write gen: 22933"}}
{"t":{"$date":"2022-01-07T23:57:14.572+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"ReadConcernMajorityNotAvailableYet: Read concern majority reads are currently not possible.","nextWakeupMillis":131600}}

mongod.log file => SECONDARY(node2)

mestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:36:13.633+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641578773:633575][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6188, snapshot max: 6188 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:37:13.642+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641578833:642631][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6190, snapshot max: 6190 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:37:49.968+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:37:49.968+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:38:13.650+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641578893:650656][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6192, snapshot max: 6192 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:38:33.785+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"NotYetInitialized: Cannot use non-local read concern until replica set is finished initializing.","nextWakeupMillis":272600}}
{"t":{"$date":"2022-01-07T23:39:13.658+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641578953:658946][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6194, snapshot max: 6194 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:40:13.664+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579013:664673][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6196, snapshot max: 6196 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:41:13.669+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579073:669126][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6198, snapshot max: 6198 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:42:13.676+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579133:676640][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6200, snapshot max: 6200 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}
{"t":{"$date":"2022-01-07T23:42:49.968+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:42:49.968+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:43:06.485+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"NotYetInitialized: Cannot use non-local read concern until replica set is finished initializing.","nextWakeupMillis":272800}}
{"t":{"$date":"2022-01-07T23:43:13.684+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579193:684659][1563:0x7fb25b6fb700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6202, snapshot max: 6202 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 10631"}}

mongod.log file => SECONDARY(node3)

{"t":{"$date":"2022-01-07T23:43:32.183+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:43:32.184+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:43:47.729+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"NotYetInitialized: Cannot use non-local read concern until replica set is finished initializing.","nextWakeupMillis":272800}}
{"t":{"$date":"2022-01-07T23:43:48.170+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579228:170155][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6202, snapshot max: 6202 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:44:48.176+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579288:176111][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6204, snapshot max: 6204 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:45:48.180+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579348:180475][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6206, snapshot max: 6206 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:46:48.186+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579408:186563][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6208, snapshot max: 6208 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:47:48.192+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579468:192364][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6210, snapshot max: 6210 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:48:20.628+05:30"},"s":"I",  "c":"-",        "id":4939300, "ctx":"monitoring-keys-for-HMAC","msg":"Failed to refresh key cache","attr":{"error":"NotYetInitialized: Cannot use non-local read concern until replica set is finished initializing.","nextWakeupMillis":273000}}
{"t":{"$date":"2022-01-07T23:48:32.183+05:30"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:48:32.184+05:30"},"s":"I",  "c":"CONTROL",  "id":20711,   "ctx":"LogicalSessionCacheReap","msg":"Failed to reap transaction table","attr":{"error":"NotYetInitialized: Replication has not yet been configured"}}
{"t":{"$date":"2022-01-07T23:48:48.197+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579528:197494][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6212, snapshot max: 6212 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:49:48.202+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579588:202120][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6214, snapshot max: 6214 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:50:48.207+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579648:207631][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6216, snapshot max: 6216 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:51:48.213+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579708:213172][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6218, snapshot max: 6218 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
{"t":{"$date":"2022-01-07T23:52:48.218+05:30"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":"[1641579768:218090][8534:0x7fc3eca31700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 6220, snapshot max: 6220 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13190"}}
1 Like

Having the same problem, how did you fix this?

We are also encountered the similar issue when we try to add the new node to the existing replica setup version 5.0 .
Can some one advise on this .
We actually upgraded the MongoDB version from 3.6 to 5.0 with replica set(upgraded both primary and secondary parallelly) , post successfully upgrade , i have tried to add another new node to the existing replica set but it is failing with below errors.

:PRIMARY> rs.add( { host: ":27017", priority: 0, votes:0 } )
assert.soon failed: function() {
var cfg = hostport;

    var local = db.getSiblingDB("local");
    assert(local.system.replset.count() <= 1,
           "error: local.system.replset has unexpected contents");
    var c = local.system.replset.findOne();
    assert(c, "no config object retrievable from local.system.replset");

    const attemptedVersion = c.version++;

    var max = 0;
    for (var i in c.members) {
        // Omit 'newlyAdded' field if it exists in the config.
        delete c.members[i].newlyAdded;
        if (c.members[i]._id > max)
            max = c.members[i]._id;
    }
    if (isString(hostport)) {
        cfg = {_id: max + 1, host: hostport};
        if (arb)
            cfg.arbiterOnly = true;
    } else if (arb == true) {
        throw Error(
            "Expected first parameter to be a host-and-port string of arbiter, but got " +
            tojson(hostport));
    }

    if (cfg._id == null) {
        cfg._id = max + 1;
    }
    c.members.push(cfg);

    res = self._runCmd({replSetReconfig: c});
    if (res === "") {
        // _runCmd caught an exception.
        return true;
    }
    if (res.ok) {
        return true;
    }
    if (res.code === ErrorCodes.ConfigurationInProgress ||
        res.code === ErrorCodes.CurrentConfigNotCommittedYet) {
        return false;  // keep retrying
    }
    if (res.code === ErrorCodes.NewReplicaSetConfigurationIncompatible) {
        // We will retry only if this error was due to our config version being too low.
        const cfgState = local.system.replset.findOne();
        if (cfgState.version >= attemptedVersion) {
            return false;  // keep retrying
        }
    }
    // Take no action on other errors.
    return true;
} : {
    "ok" : 0,
    "errmsg" : "Incompatible delay field name. If the node is in FCV 4.9, it must use secondaryDelaySecs.",
    "code" : 103,
    "codeName" : "NewReplicaSetConfigurationIncompatible",
    "$clusterTime" : {
            "clusterTime" : Timestamp(1708509633, 1),
            "signature" : {
                    "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                    "keyId" : NumberLong(0)
            }
    },
    "operationTime" : Timestamp(1708509633, 1)

} The hang analyzer is automatically called in assert.soon functions. If you are expecting assert.soon to possibly fail, call assert.soon with {runHangAnalyzer: false} as the fifth argument (you can fill unused arguments with undefined). Running hang analyzer from assert.soon.
Skipping runHangAnalyzer: no TestData (not running from resmoke)