• R/O
  • SSH

pm_logconv-cs: Commit

Pacemaker 対応ログメッセージ変換機能。

Heartbeat-2.1.4 用 hb-logconv(*) のPacemaker 1.0.x + Corosync スタック対応版。
(*) http://sourceforge.jp/projects/linux-ha/releases/?package_id=10282


Commit MetaInfo

Revision66fd3cf7d2916e8a3a7986da15c515e26cd95e0f (tree)
Time2013-06-03 18:26:38
AuthorYoshihiko SATO <satoyoshi@inte...>
CommiterYoshihiko SATO

Log Message

コードコメントのログ出力例を最新化

Change Summary

Incremental Difference

diff -r a17af55091b7 -r 66fd3cf7d291 pm_logconv.conf
--- a/pm_logconv.conf Mon Jun 03 17:15:04 2013 +0900
+++ b/pm_logconv.conf Mon Jun 03 18:26:38 2013 +0900
@@ -157,7 +157,7 @@
157157 pattern=pengine,notice:,LogActions: Stop
158158 func=add_rsc_stop
159159
160-#MsgNo.F11-3, F11-5, F11-8, F11-9
160+#MsgNo.F11-3, F11-5, F11-8, F11-9, F11-11, F11-12
161161 #The message is not output immediately, output when F/O is complete.
162162 [Add no action]
163163 pattern_leave=pengine,info:,LogActions: Leave
@@ -166,7 +166,7 @@
166166 pattern_reload=pengine,notice:,LogActions: Reload
167167 func=add_no_action
168168
169-#MsgNo.F11-6
169+#MsgNo.F11-6, F11-13 ,F11-14
170170 #The message is not output immediately, output when F/O is complete.
171171 [Add Resource move action]
172172 pattern_move=pengine,notice:,LogActions: Move
diff -r a17af55091b7 -r 66fd3cf7d291 pm_logconv.py
--- a/pm_logconv.py Mon Jun 03 17:15:04 2013 +0900
+++ b/pm_logconv.py Mon Jun 03 18:26:38 2013 +0900
@@ -2265,21 +2265,37 @@
22652265 NOTE: monitor operation is not a target.
22662266
22672267 MsgNo.1-1)
2268- Jan 6 14:16:27 x3650a crmd: [9874]: info: do_lrm_rsc_op: Performing key=17:2:0:dae9d86d-9c4b-44f2-822c-b559db044ba2 op=prmApPostgreSQLDB_start_0 )
2268+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2269+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2270+ op=prmRSC_start_0
22692271 MsgNo.2-1)
2270- Jan 6 15:05:00 x3650a crmd: [9874]: info: do_lrm_rsc_op: Performing key=20:7:0:dae9d86d-9c4b-44f2-822c-b559db044ba2 op=prmApPostgreSQLDB_stop_0 )
2272+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2273+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2274+ op=prmRSC_stop_0
22712275 MsgNo.4-1)
2272- Jan 12 18:34:51 x3650a crmd: [15901]: info: do_lrm_rsc_op: Performing key=32:13:0:9d68ec4b-527f-4dda-88b3-9203fef16f56 op=prmStateful:1_promote_0 )
2276+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2277+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2278+ op=prmRSC_promote_0
22732279 MsgNo.5-1)
2274- Jan 12 18:34:49 x3650a crmd: [3464]: info: do_lrm_rsc_op: Performing key=35:11:0:9d68ec4b-527f-4dda-88b3-9203fef16f56 op=prmStateful:0_demote_0 )
2280+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2281+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2282+ op=prmRSC_demote_0
22752283 MsgNo.11-1)
2276- Mar 24 11:17:59 x3650e crmd: [3181]: info: do_lrm_rsc_op: Performing key=9:68:0:13f2515c-f788-4cc4-b5a4-33308d617adc op=prmGuest-a1_migrate_to_0 )
2284+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2285+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2286+ op=prmRSC_migrate_to_0
22772287 MsgNo.12-1)
2278- Mar 24 11:18:14 x3650f crmd: [3356]: info: do_lrm_rsc_op: Performing key=10:68:0:13f2515c-f788-4cc4-b5a4-33308d617adc op=prmGuest-a1_migrate_from_0 )
2288+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2289+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2290+ op=prmRSC_migrate_from_0
22792291 MsgNo.17-1)
2280- Jan 7 10:21:41 x3650a crmd: [25493]: info: do_lrm_rsc_op: Performing key=35:1:0:683d57a3-6623-46ae-bbc9-6b7930aec9c2 op=prmStonith2-3_start_0 )
2292+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2293+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2294+ op=prmStonith1_start_0
22812295 MsgNo.18-1)
2282- Jan 7 10:22:11 x3650a crmd: [25493]: info: do_lrm_rsc_op: Performing key=30:5:0:683d57a3-6623-46ae-bbc9-6b7930aec9c2 op=prmStonith2-3_stop_0 )
2296+ Jan 1 00:00:00 node01 crmd[777]: info: do_lrm_rsc_op:
2297+ Performing key=11:1:0:00000000-0000-0000-0000-000000000000
2298+ op=prmStonith1_stop_0
22832299 '''
22842300 def try_to_operate(self, outputobj, logelm, lconvfrm):
22852301 try:
@@ -2305,21 +2321,37 @@
23052321 NOTE: monitor operation is not a target.
23062322
23072323 MsgNo.1-2)
2308- Jan 6 14:16:28 x3650a crmd: [9874]: info: process_lrm_event: LRM operation prmApPostgreSQLDB_start_0 (call=25, rc=0, cib-update=69, confirmed=true) ok
2324+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2325+ LRM operation prmRSC_start_0 (call=168, rc=0,
2326+ cib-update=67, confirmed=true) ok
23092327 MsgNo.2-2)
2310- Jan 6 15:05:01 x3650a crmd: [9874]: info: process_lrm_event: LRM operation prmApPostgreSQLDB_stop_0 (call=27, rc=0, cib-update=79, confirmed=true) ok
2328+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2329+ LRM operation prmRSC_stop_0 (call=168, rc=0,
2330+ cib-update=67, confirmed=true) ok
23112331 MsgNo.4-2)
2312- Jan 12 18:34:51 x3650a crmd: [15901]: info: process_lrm_event: LRM operation prmStateful:1_promote_0 (call=18, rc=0, cib-update=27, confirmed=true) ok
2332+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2333+ LRM operation prmRSC_promote_0 (call=132, rc=0,
2334+ cib-update=54, confirmed=true) ok
23132335 MsgNo.5-2)
2314- Jan 12 18:34:49 x3650a crmd: [3464]: info: process_lrm_event: LRM operation prmStateful:0_demote_0 (call=37, rc=0, cib-update=79, confirmed=true) ok
2336+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2337+ LRM operation prmRSC_demote_0 (call=132, rc=0,
2338+ cib-update=54, confirmed=true) ok
23152339 MsgNo.11-2)
2316- Mar 24 11:18:14 x3650e crmd: [3181]: info: process_lrm_event: LRM operation prmGuest-a1_migrate_to_0 (call=33, rc=0, cib-update=60, confirmed=true) ok
2340+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2341+ LRM operation prmRSC_migrate_to_0 (call=215, rc=0,
2342+ cib-update=100, confirmed=true) ok
23172343 MsgNo.12-2)
2318- Mar 24 11:18:15 x3650f crmd: [3356]: info: process_lrm_event: LRM operation prmGuest-a1_migrate_from_0 (call=35, rc=0, cib-update=248, confirmed=true) ok
2344+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2345+ LRM operation prmRSC_migrate_from_0 (call=215, rc=0,
2346+ cib-update=100, confirmed=true) ok
23192347 MsgNo.17-2)
2320- Jan 7 10:21:41 x3650a crmd: [25493]: info: process_lrm_event: LRM operation prmStonith2-3_start_0 (call=11, rc=0, cib-update=42, confirmed=true) ok
2348+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2349+ LRM operation prmStonith1_start_0 (call=168, rc=0,
2350+ cib-update=67, confirmed=true) ok
23212351 MsgNo.18-2)
2322- Jan 7 10:22:11 x3650a crmd: [25493]: info: process_lrm_event: LRM operation prmStonith2-3_stop_0 (call=34, rc=0, cib-update=71, confirmed=true) ok
2352+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2353+ LRM operation prmStonith1_stop_0 (call=168, rc=0,
2354+ cib-update=67, confirmed=true) ok
23232355 '''
23242356 def operation_succeeded(self, outputobj, logelm, lconvfrm):
23252357 completeopDic = {
@@ -2354,23 +2386,41 @@
23542386 monitor (exclude rc=OCF_NOT_RUNNING), promote, demote,
23552387 and STONITH resource's start, stop.
23562388 MsgNo.1-3)
2357- Jan 6 15:22:45 x3650a crmd: [26989]: info: process_lrm_event: LRM operation prmApPostgreSQLDB_start_0 (call=25, rc=1, cib-update=58, confirmed=true) unknown error
2389+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2390+ LRM operation prmRSC_start_0 (call=254, rc=1,
2391+ cib-update=107, confirmed=true) unknown error
23582392 MsgNo.2-3)
2359- Jan 6 18:11:34 x3650a crmd: [4144]: info: process_lrm_event: LRM operation prmApPostgreSQLDB_stop_0 (call=27, rc=1, cib-update=76, confirmed=true) unknown error
2393+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2394+ LRM operation prmRSC_stop_0 (call=254, rc=1,
2395+ cib-update=107, confirmed=true) unknown error
23602396 MsgNo.3-1)
2361- Jan 6 19:23:01 x3650a crmd: [19038]: info: process_lrm_event: LRM operation prmExPostgreSQLDB_monitor_10000 (call=16, rc=1, cib-update=72, confirmed=false) unknown error
2397+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2398+ LRM operation prmRSC_monitor_0 (call=254, rc=1,
2399+ cib-update=107, confirmed=true) unknown error
23622400 MsgNo.4-3)
2363- Jan 6 15:22:45 x3650a crmd: [26989]: info: process_lrm_event: LRM operation prmStateful:1_promote_0 (call=25, rc=1, cib-update=58, confirmed=true) unknown error
2401+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2402+ LRM operation prmRSC_promote_0 (call=254, rc=1,
2403+ cib-update=107, confirmed=true) unknown error
23642404 MsgNo.5-3)
2365- Jan 6 15:22:45 x3650a crmd: [26989]: info: process_lrm_event: LRM operation prmStateful:1_demote_0 (call=25, rc=1, cib-update=58, confirmed=true) unknown error
2405+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2406+ LRM operation prmRSC_demote_0 (call=254, rc=1,
2407+ cib-update=107, confirmed=true) unknown error
23662408 MsgNo.11-3)
2367- Mar 24 11:43:41 x3650e crmd: [3181]: info: process_lrm_event: LRM operation prmGuest-a1_migrate_to_0 (call=37, rc=1, cib-update=254, confirmed=true) unknown error
2409+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2410+ LRM operation prmRSC_mitrate_to_0 (call=254, rc=1,
2411+ cib-update=107, confirmed=true) unknown error
23682412 MsgNo.12-3)
2369- Mar 24 12:11:03 x3650f crmd: [3356]: info: process_lrm_event: LRM operation prmGuest-a1_migrate_from_0 (call=45, rc=1, cib-update=292, confirmed=true) unknown error
2413+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2414+ LRM operation prmRSC_mitrate_from_0 (call=254, rc=1,
2415+ cib-update=107, confirmed=true) unknown error
23702416 MsgNo.17-3)
2371- Jan 7 10:54:45 x3650a crmd: [32714]: info: process_lrm_event: LRM operation prmStonith2-3_start_0 (call=11, rc=1, cib-update=56, confirmed=true) unknown error
2417+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2418+ LRM operation prmStonith1_start_0 (call=145, status=4,
2419+ cib-update=127, confirmed=true) Error
23722420 MsgNo.19-1)
2373- Jan 7 13:47:57 x3650a crmd: [19263]: info: process_lrm_event: LRM operation prmStonith2-3_monitor_30000 (call=30, rc=14, cib-update=89, confirmed=false) status: unknown
2421+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2422+ LRM operation prmStonith1_monitor_10000 (call=130, status=4,
2423+ cib-update=79, confirmed=false) Error
23742424 '''
23752425 def operation_failed(self, outputobj, logelm, lconvfrm):
23762426 try:
@@ -2390,19 +2440,26 @@
23902440 Convert log message which means operation for OCF resource timed out.
23912441 This function is common for start, stop, monitor, promote, demote.
23922442 MsgNo.1-4)
2393- Jan 6 17:41:35 x3650a crmd: [1404]: ERROR: process_lrm_event: LRM operation prmApPostgreSQLDB_start_0 (25) Timed Out (timeout=30000ms)
2443+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2444+ LRM operation prmRSC_start_0 (77) Timed Out (timeout=10000ms)
23942445 MsgNo.2-4)
2395- Jan 6 18:19:47 x3650a crmd: [7948]: ERROR: process_lrm_event: LRM operation prmApPostgreSQLDB_stop_0 (27) Timed Out (timeout=30000ms)
2446+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2447+ LRM operation prmRSC_stop_0 (77) Timed Out (timeout=10000ms)
23962448 MsgNo.3-3)
2397- Jan 6 19:55:31 x3650a crmd: [28183]: ERROR: process_lrm_event: LRM operation prmExPostgreSQLDB_monitor_10000 (27) Timed Out (timeout=30000ms)
2449+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2450+ LRM operation prmRSC_monitor_10000 (77) Timed Out (timeout=10000ms)
23982451 MsgNo.4-4)
2399- Jan 6 17:41:35 x3650a crmd: [1404]: ERROR: process_lrm_event: LRM operation prmStateful:1_promote_0 (25) Timed Out (timeout=30000ms)
2452+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2453+ LRM operation prmRSC_promote_0 (77) Timed Out (timeout=10000ms)
24002454 MsgNo.5-4)
2401- Jan 6 17:41:35 x3650a crmd: [1404]: ERROR: process_lrm_event: LRM operation prmStateful:1_demote_0 (25) Timed Out (timeout=30000ms)
2455+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2456+ LRM operation prmRSC_demote_0 (77) Timed Out (timeout=10000ms)
24022457 MsgNo.11-4)
2403- Mar 24 11:59:21 x3650e crmd: [3181]: ERROR: process_lrm_event: LRM operation prmGuest-a1_migrate_to_0 (42) Timed Out (timeout=120000ms)
2458+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2459+ LRM operation prmRSC_migrate_to_0 (77) Timed Out (timeout=10000ms)
24042460 MsgNo.12-4)
2405- Mar 24 12:16:15 x3650f crmd: [3356]: ERROR: process_lrm_event: LRM operation prmGuest-a1_migrate_from_0 (48) Timed Out (timeout=120000ms)
2461+ Jan 1 00:00:00 node01 crmd[777]: error: process_lrm_event:
2462+ LRM operation prmRSC_migrate_from_0 (77) Timed Out (timeout=10000ms)
24062463 '''
24072464 def operation_timedout_ocf(self, outputobj, logelm, lconvfrm):
24082465 try:
@@ -2427,7 +2484,9 @@
24272484 (rc=OCF_NOT_RUNNING).
24282485
24292486 MsgNo.3-2)
2430- Jan 6 19:45:58 x3650a crmd: [23987]: info: process_lrm_event: LRM operation prmExPostgreSQLDB_monitor_10000 (call=16, rc=7, cib-update=60, confirmed=false) not running
2487+ Jan 1 00:00:00 node01 crmd[777]: notice: process_lrm_event:
2488+ LRM operation prmRSC_monitor_10000 (call=270, rc=7,
2489+ cib-update=118, confirmed=false) not running
24312490 '''
24322491 def detect_rsc_failure(self, outputobj, logelm, lconvfrm):
24332492 try:
@@ -2450,9 +2509,11 @@
24502509 Convert log message which means Node status updated.
24512510
24522511 MsgNo.6-1)
2453- Apr 3 14:25:20 pm01 crmd[15994]: info: peer_update_callback: pm02 is now lost (was member)
2512+ Jan 1 00:00:00 node01 crmd[777]: info: peer_update_callback:
2513+ node02 is now lost (was member)
24542514 MsgNo.6-2)
2455- Apr 3 14:26:44 pm01 crmd[15994]: info: peer_update_callback: pm02 is now member (was lost)
2515+ Jan 1 00:00:00 node01 crmd[777]: info: peer_update_callback:
2516+ node02 is now member
24562517 '''
24572518 def node_status_updated(self, outputobj, logelm, lconvfrm):
24582519 try:
@@ -2478,7 +2539,8 @@
24782539 So it outputs nothing.
24792540
24802541 MsgNo. 6-3)
2481- Jun 14 15:04:56 x3650a pengine: [21571]: info: determine_online_status: Node x3650a is online
2542+ Jan 1 00:00:00 node01 crmd[777]: info:
2543+ determine_online_status: Node node01 is online
24822544 '''
24832545 def node_status_determined(self, outputobj, logelm, lconvfrm):
24842546 try:
@@ -2500,7 +2562,8 @@
25002562 Convert log message which means Interconnect-LAN status changed to "dead"
25012563
25022564 MsgNo.7-1)
2503- Jan 19 10:56:53 x3650a corosync[22855]: [TOTEM ] Marking seqid 1287 ringid 0 interface 192.168.101.1 FAULTY - adminisrtative intervention required.
2565+ Jan 1 00:00:00 node01 corosync[777]: [TOTEM ] Marking seqid 53645
2566+ ringid 0 interface 192.168.101.1 FAULTY
25042567 '''
25052568 def detect_iconnlan_dead(self, outputobj, logelm, lconvfrm):
25062569 try:
@@ -2522,7 +2585,9 @@
25222585 See also the comment on detect_iconnlan_dead().
25232586
25242587 MsgNo.8-1)
2525- Jul 21 19:00:20 x3650a ping[2206]: WARNING: 192.168.201.254 is inactive: PING 192.168.201.254 (192.168.201.254) 56(84) bytes of data. [snip]
2588+ Jan 1 00:00:00 node01 ping(prmPing)[777]: WARNING:
2589+ 192.168.201.254 is inactive: PING 192.168.201.254 (192.168.201.254)
2590+ 56(84) bytes of data.
25262591 '''
25272592 def detect_node_dead(self, outputobj, logelm, lconvfrm):
25282593 try:
@@ -2545,7 +2610,9 @@
25452610 Convert log message which means disk error.
25462611
25472612 MsgNo.9-1)
2548- Jun 24 20:19:53 x3650a diskd: [22126]: WARN: check_status: disk status is changed, attr_name=diskcheck_status_internal, target=/tmp, new_status=ERROR
2613+ Jan 1 00:00:00 node01 diskd[777]: warning: check_status:
2614+ disk status is changed, attr_name=diskcheck_status,
2615+ target=/dev/mapper/lun1, new_status=ERROR
25492616 '''
25502617 def detect_disk_error(self, outputobj, logelm, lconvfrm):
25512618 try:
@@ -2569,7 +2636,8 @@
25692636 Convert log message which means respawn process start.
25702637
25712638 MsgNo.10-1)
2572- Jul 27 17:29:52 x3650a pacemakerd: [2781]: info: start_child: Forked child 5800 for process cib
2639+ Jan 1 00:00:00 node01 pacemakerd[777]: info: start_child:
2640+ Forked child 888 for process cib
25732641 '''
25742642 def respawn_start(self, outputobj, logelm, lconvfrm):
25752643 try:
@@ -2589,7 +2657,8 @@
25892657 Convert log message which means respawn process exited with error.
25902658
25912659 MsgNo.10-2)
2592- Jul 20 15:47:47 x3650a pacemakerd: [2393]: ERROR: pcmk_child_exit: Child process attrd exited (pid=25803, rc=2)
2660+ Jan 1 00:00:00 node01 pacemakerd[777]: error: pcmk_child_exit:
2661+ Child process cib exited (pid=888, rc=100)
25932662 '''
25942663 def respawn_exited_abnormally(self, outputobj, logelm, lconvfrm):
25952664 try:
@@ -2610,7 +2679,8 @@
26102679 Convert log message which means respawn process killed by signal.
26112680
26122681 MsgNo.10-3)
2613- Jul 20 15:46:43 x3650a pacemakerd: [27029]: notice: pcmk_child_exit: Child process crmd terminated with signal 9 (pid=22591, rc=0)
2682+ Jan 1 00:00:00 node01 pacemakerd[777]: notice: pcmk_child_exit:
2683+ Child process cib terminated with signal 9 (pid=888, core=0)
26142684 '''
26152685 def respawn_killed(self, outputobj, logelm, lconvfrm):
26162686 try:
@@ -2631,7 +2701,8 @@
26312701 Convert log message which means respawn process exited normally in shutdown process.
26322702
26332703 MsgNo.10-6)
2634- Jul 27 17:30:34 x3650a pacemakerd: [2393]: notice: pcmk_child_exit: Child process attrd exited (pid=25803, rc=0)
2704+ Jan 1 00:00:00 node01 pacemakerd[777]: info: pcmk_child_exit:
2705+ Child process cib exited (pid=888, rc=0)
26352706 '''
26362707 def respawn_exited_normally(self, outputobj, logelm, lconvfrm):
26372708 try:
@@ -2658,7 +2729,8 @@
26582729 Convert log message which means do respawning too frequently in a short term.
26592730
26602731 MsgNo.10-7)
2661- Jul 27 17:23:40 x3650a pacemakerd: [13090]: ERROR: pcmk_child_exit: Child respawn count exceeded by attrd
2732+ Jan 1 00:00:00 node01 pacemakerd[777]: error:
2733+ pcmk_process_exit: Child respawn count exceeded by cib
26622734 '''
26632735 def respawn_too_fast(self, outputobj, logelm, lconvfrm):
26642736 try:
@@ -2698,7 +2770,9 @@
26982770 If not or it is already in F/O process, it outputs nothing.
26992771
27002772 MsgNo.F0-1, F9-1, F10-1)
2701- Jan 5 15:19:20 x3650a crmd: [17659]: info: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph ]
2773+ Jan 1 00:00:00 node01 crmd[777]: notice: do_state_transition:
2774+ State transition S_IDLE -> S_POLICY_ENGINE
2775+ [ input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped ]
27022776 '''
27032777 def detect_pe_calc(self, outputobj, logelm, lconvfrm):
27042778 cstat.IN_CALC = True
@@ -2719,7 +2793,9 @@
27192793 This function is called when cluster status became "S_IDLE" or "S_STOPPING".
27202794
27212795 MsgNo.F0-2, F12-1, F12-2)
2722- Jan 5 14:50:07 x3650a crmd: [13198]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
2796+ Jan 1 00:00:00 node01 crmd[777]: notice: do_state_transition:
2797+ State transition S_TRANSITION_ENGINE -> S_IDLE
2798+ [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
27232799 '''
27242800 def detect_fo_complete(self, outputobj, logelm, lconvfrm):
27252801
@@ -2784,7 +2860,8 @@
27842860 So it outputs nothing.
27852861
27862862 MsgNo. F11-1)
2787- Jan 5 15:12:25 x3650a pengine: [16657]: notice: LogActions: Start prmExPostgreSQLDB (x3650a)
2863+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2864+ Start prmRSC (node01)
27882865 '''
27892866 def add_rsc_start(self, outputobj, logelm, lconvfrm):
27902867 try:
@@ -2811,7 +2888,8 @@
28112888 This is to get resource status when F/O finished.
28122889
28132890 MsgNo. F11-2)
2814- Jan 5 15:19:23 x3650a pengine: [17658]: notice: LogActions: Stop prmExPostgreSQLDB (x3650a)
2891+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2892+ Stop prmRSC (node01)
28152893 '''
28162894 def add_rsc_stop(self, outputobj, logelm, lconvfrm):
28172895 try:
@@ -2843,13 +2921,23 @@
28432921 So it outputs nothing.
28442922
28452923 MsgNo.F11-3)
2846- Jan 5 15:36:42 x3650a pengine: [27135]: notice: LogActions: Leave prmFsPostgreSQLDB1 (Started x3650a)
2924+ Jan 1 00:00:00 node01 pengine[777]: info: LogActions:
2925+ Leave prmRSC (Started node01)
28472926 MsgNo.F11-5)
2848- Jan 5 15:36:42 x3650a pengine: [27135]: notice: LogActions: Leave prmFsPostgreSQLDB1 (Started unmanaged)
2927+ Jan 1 00:00:00 node01 pengine[777]: info: LogActions:
2928+ Leave prmRSC (Started unmanaged)
28492929 MsgNo.F11-8)
2850- Jan 5 14:50:05 x3650a pengine: [13197]: notice: LogActions: Restart prmIpPostgreSQLDB (Started x3650b)
2930+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2931+ Restart prmRSC (Started node01)
28512932 MsgNo.F11-9)
2852- Jan 5 14:50:41 x3650a pengine: [13197]: notice: LogActions: Leave prmPingd:0 (Stopped)
2933+ Jan 1 00:00:00 node01 pengine[777]: info: LogActions:
2934+ Leave prmRSC (Stopped)
2935+ MsgNo.F11-11)
2936+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2937+ Recover prmRSC (Started node01)
2938+ MsgNo.F11-12)
2939+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2940+ Reload prmRSC (Started node01)
28532941 '''
28542942 def add_no_action(self, outputobj, logelm, lconvfrm):
28552943 try:
@@ -2885,7 +2973,14 @@
28852973 This is to get resource status when F/O started.
28862974
28872975 MsgNo. F11-6)
2888- Jan 5 15:12:27 x3650a pengine: [16657]: notice: LogActions: Move prmExPostgreSQLDB (Started x3650a -> x3650b)
2976+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2977+ Move prmRSC (Started node01 -> node02)
2978+ MsgNo. F11-13)
2979+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2980+ Recover prmRSC (Started node01 -> node02)
2981+ MsgNo. F11-14)
2982+ Jan 1 00:00:00 node01 pengine[777]: notice: LogActions:
2983+ Migrate prmRSC (Started node01 -> node02)
28892984 '''
28902985 def add_rsc_move(self, outputobj, logelm, lconvfrm):
28912986 try:
@@ -2920,8 +3015,8 @@
29203015 So it outputs nothing.
29213016
29223017 MsgNo. F11-10)
2923- May 27 11:23:50 x3650a crmd: [8108]: info: te_rsc_command: Initiating action 25: start prmExPostgreSQLDB_start_0 on x3650a (local)
2924- May 27 11:23:50 x3650a crmd: [8108]: info: te_rsc_command: Initiating action 25: start prmExPostgreSQLDB_start_0 on x3650b
3018+ Jan 1 00:00:00 node01 crmd[777]: notice: te_rsc_command:
3019+ Initiating action 26: start prmRSC_start_0 on node01 (local)
29253020 '''
29263021 def rsc_init_action(self, outputobj, logelm, lconvfrm):
29273022 if cstat.IN_FO_PROCESS == False:
@@ -2949,7 +3044,8 @@
29493044 Convert log message which means DC election is complete.
29503045
29513046 MsgNo.13-2)
2952- Jan 6 14:16:18 x3650a crmd: [9874]: info: update_dc: Set DC to x3650a (3.0.1)
3047+ Jan 1 00:00:00 node01 crmd[777]: info: update_dc:
3048+ Set DC to node01 (3.0.7)
29533049 '''
29543050 def dc_election_complete(self, outputobj, logelm, lconvfrm):
29553051 try:
@@ -2967,7 +3063,8 @@
29673063 Convert log message which means unset DC node.
29683064
29693065 MsgNo.13-5)
2970- Jan 12 11:22:18 x3650a crmd: [5796]: info: update_dc: Unset DC x3650a
3066+ Jan 1 00:00:00 node01 crmd[777]: info: update_dc:
3067+ Unset DC. Was node02
29713068 '''
29723069 def detect_unset_dc(self, outputobj, logelm, lconvfrm):
29733070 try:
@@ -2990,7 +3087,8 @@
29903087 node list, detect the message with peculiar function.
29913088
29923089 MsgNo.14-2)
2993- Jan 18 10:49:13 x3650a corosync[3580]: [MAIN ] Corosync Cluster Engine exiting normally
3090+ Jan 1 00:00:00 node01 corosync[777]: [MAIN ]
3091+ Corosync Cluster Engine exiting normally
29943092 '''
29953093 def detect_cs_shutdown(self, outputobj, logelm, lconvfrm):
29963094 outputobj.output_log(lconvfrm.loglevel, lconvfrm.rulename)
@@ -3002,7 +3100,8 @@
30023100 in the cluster send shutdown request.
30033101
30043102 MsgNo.14-5)
3005- Jan 18 10:35:08 x3650a crmd: [10975]: info: handle_shutdown_request: Creating shutdown request for x3650b (state=S_IDLE)
3103+ Jan 1 00:00:00 node01 crmd[777]: info: handle_shutdown_request:
3104+ Creating shutdown request for node02 (state=S_IDLE)
30063105 '''
30073106 def detect_shutdown_request(self, outputobj, logelm, lconvfrm):
30083107 try:
@@ -3023,7 +3122,9 @@
30233122 Convert log message which means fence operation started.
30243123
30253124 MsgNo.21-1)
3026- Jul 22 11:24:58 x3650a crmd: [1997]: info: te_fence_node: Executing reboot fencing operation (28) on x3650b (timeout=40000)
3125+ Jan 1 00:00:00 node01 crmd[777]: notice: te_fence_node:
3126+ Executing reboot fencing operation (79) on node02
3127+ (timeout=60000)
30273128 '''
30283129 def fence_op_started(self, outputobj, logelm, lconvfrm):
30293130 try:
@@ -3051,8 +3152,9 @@
30513152 (ref=00000000-0000-0000-0000-000000000000) by client stonith_admin.888
30523153 MsgNo.21-3)
30533154 Jan 1 00:00:00 node01 crmd[777]: notice: tengine_stonith_notify:
3054- Peer node02 was not terminated (reboot) by node03 for node01: Timer expired
3055- (ref=00000000-0000-0000-0000-000000000000) by client crmd.777
3155+ Peer node02 was not terminated (reboot) by node03 for node01:
3156+ Timer expired (ref=00000000-0000-0000-0000-000000000000)
3157+ by client crmd.777
30563158 '''
30573159 def fence_op_ended(self, outputobj, logelm, lconvfrm):
30583160 try:
@@ -3101,8 +3203,9 @@
31013203 Convert log message which means executing stonith device started.
31023204
31033205 MsgNo.21-5)
3104- Jan 1 00:00:00 node01 stonith-ng[777]: info: call_remote_stonith:
3105- Requesting that node03 perform op reboot node02 with prmStonith1 for crmd.888 (72s)
3206+ Jan 1 00:00:00 node01 stonith-ng[777]: info:
3207+ call_remote_stonith: Requesting that node03 perform op
3208+ reboot node02 with prmStonith1 for crmd.888 (72s)
31063209 '''
31073210 def exec_st_device_started(self, outputobj, logelm, lconvfrm):
31083211 try:
@@ -3128,11 +3231,13 @@
31283231 Convert log message which means executing stonith device ended.
31293232
31303233 MsgNo.21-6)
3131- Jan 1 00:00:00 node01 stonith-ng[777]: notice: process_remote_stonith_exec:
3132- Call to prmStonith1 for node02 on behalf of crmd.888@node01: OK (0)
3234+ Jan 1 00:00:00 node01 stonith-ng[777]: notice:
3235+ process_remote_stonith_exec: Call to prmStonith1 for node02
3236+ on behalf of crmd.888@node01: OK (0)
31333237 MsgNo.21-7)
3134- Jan 1 00:00:00 node01 stonith-ng[777]: notice: process_remote_stonith_exec:
3135- Call to prmStonith1 for node02 on behalf of crmd.888@node01: Generic Pacemaker error (-201)
3238+ Jan 1 00:00:00 node01 stonith-ng[777]: notice:
3239+ process_remote_stonith_exec: Call to prmStonith1 for node02
3240+ on behalf of crmd.888@node01: Generic Pacemaker error (-201)
31363241 '''
31373242 def exec_st_device_ended(self, outputobj, logelm, lconvfrm):
31383243 try:
@@ -3160,7 +3265,8 @@
31603265 Convert log message which means attribute value on own node updated.
31613266
31623267 MsgNo.22-1)
3163- Jun 24 09:49:58 x3650a attrd: [16121]: notice: attrd_perform_update: Sent update 45: diskcheck_status_internal=ERROR
3268+ Jan 1 00:00:00 node01 attrd[777]: notice: attrd_perform_update:
3269+ Sent update 7: diskcheck_status=normal
31643270 '''
31653271 def detect_attr_updated(self, outputobj, logelm, lconvfrm):
31663272 try:
@@ -3185,7 +3291,9 @@
31853291 Convert log message which means attribute value on own node deleted.
31863292
31873293 MsgNo.22-2)
3188- Jun 24 19:43:39 x3650a attrd: [10425]: notice: attrd_perform_update: Sent delete 34: node=x3650a, attr=diskcheck_status, id=<n/a>, set=(null), section=status
3294+ Jan 1 00:00:00 node01 attrd[777]: notice: attrd_perform_update:
3295+ Sent delete 51: node=2657462464, attr=diskcheck_status,
3296+ id=<n/a>, set=(null), section=status
31893297 '''
31903298 def detect_attr_deleted(self, outputobj, logelm, lconvfrm):
31913299 try:
@@ -3203,10 +3311,11 @@
32033311 Detect cib updated or added.
32043312
32053313 MsgNo. 22-3)
3206- Jul 8 11:30:24 x3650a crmd: [4118]: info: abort_transition_graph: \
3207- te_update_diff:150 - Triggered transition abort \
3208- (complete=1, tag=nvpair, id=status-x3650b-default_ping_set, name=default_ping_set, value=100, magic=NA, cib=0.10.47) \
3209- : Transient attribute: update
3314+ Jan 1 00:00:00 node01 crmd[777]: info: abort_transition_graph:
3315+ te_update_diff:172 - Triggered transition abort (complete=0,
3316+ node=node01, tag=nvpair, id=status-2657462464-diskcheck_status,
3317+ name=diskcheck_status, value=ERROR, magic=NA, cib=0.568.22) :
3318+ Transient attribute: update
32103319 '''
32113320 def detect_cib_updated(self, outputobj, logelm, lconvfrm):
32123321 try:
@@ -3234,7 +3343,8 @@
32343343 Convert log message which means Corosync service is starting.
32353344
32363345 MsgNo.23-1)
3237- Jan 15 19:30:19 x3650a corosync[2657]: [MAIN ] Corosync Cluster Engine ('1.3.2'): started and ready to provide service.
3346+ Jan 1 00:00:00 node01 corosync[777]: [MAIN ] Corosync Cluster
3347+ Engine ('2.3.0'): started and ready to provide service.
32383348 '''
32393349 def detect_cs_start(self, outputobj, logelm, lconvfrm):
32403350 try:
@@ -3255,8 +3365,10 @@
32553365 Convert log message which means Pacemaker is starting.
32563366
32573367 MsgNo.23-4)
3258- Feb 1 13:09:28 x3650a pacemakerd: [3644]: notice: main: Starting Pacemaker 1.1.9-1.el6 (Build: f2a0c41): \
3259- generated-manpages agent-manpages ncurses libqb-logging libqb-ipc lha-fencing nagios corosync-native
3368+ Jan 1 00:00:00 node01 pacemakerd[777]: notice: main:
3369+ Starting Pacemaker 1.1.10 (Build: 1af3c9b): generated-manpages
3370+ agent-manpages ascii-docs publican-docs ncurses libqb-logging
3371+ libqb-ipc lha-fencing nagios corosync-native
32603372 '''
32613373 def detect_pcmk_start(self, outputobj, logelm, lconvfrm):
32623374 try:
Show on old repository browser