メインリポジトリ
| Revision | 63103ea31777d24593915505cf9d245c4fc4abb3 (tree) |
|---|---|
| Time | 2010-07-16 19:34:06 |
| Author | |
| Commiter | Kei Funagayama |
Merge branch 'dev'
Conflicts:
doc/redhat.spec
| @@ -0,0 +1,7 @@ | ||
| 1 | +# Lines that start with '#' are comments. | |
| 2 | +*.pyc | |
| 3 | +*~ | |
| 4 | +*.bak | |
| 5 | +*.old | |
| 6 | +*.orig | |
| 7 | +*.swp |
| @@ -5,3 +5,4 @@ The pysilhouette project was initiated by: | ||
| 5 | 5 | Kei Funagayama <kei@karesansui-project.info> |
| 6 | 6 | Junichi Shinohara <junichi@karesansui-project.info> |
| 7 | 7 | Kazuya Hayashi <kazuya@karesansui-project.info> |
| 8 | + |
| @@ -1,4 +1,12 @@ | ||
| 1 | +Fri Jul 09 18:00:00 +0900 2010 Kei Funagayama <kei.topaz@gmail.com> | |
| 2 | + Version 0.7.0 release | |
| 3 | +Tue Jun 29 18:00:00 +0900 2010 Kei Funagayama <kei.topaz@gmail.com> | |
| 4 | + Version 0.7 beta3 release | |
| 5 | +Fri Apr 09 18:00:00 +0900 2010 Kei Funagayama <kei.topaz@gmail.com> | |
| 6 | + Version 0.7 beta2 release | |
| 7 | +Fri Apr 09 12:00:00 +0900 2010 Kei Funagayama <kei.topaz@gmail.com> | |
| 8 | + Version 0.7 beta1 release | |
| 9 | +Wed Jan 03 12:00:00 +0900 2010 Kei Funagayama <kei@karesansui-project.info> | |
| 10 | + Version 0.7 alpha1 release | |
| 1 | 11 | Thu Jun 28 12:00:00 +0900 2009 Kei Funagayama <kei@karesansui-project.info> |
| 2 | - | |
| 3 | - Version 0.6 Release | |
| 4 | - | |
| 12 | + Version 0.6 release |
| @@ -1,7 +1,7 @@ | ||
| 1 | 1 | Installing Pysilhouette |
| 2 | 2 | ========================== |
| 3 | 3 | |
| 4 | -Copyright (C) 2009 HDE, Inc. | |
| 4 | +Copyright (C) 2009-2010 HDE, Inc. | |
| 5 | 5 | |
| 6 | 6 | Redistributing, copying, modifying of this file is granted with no restriction. |
| 7 | 7 |
| @@ -57,8 +57,8 @@ whitelist.conf is placed in the directory below by default: | ||
| 57 | 57 | |
| 58 | 58 | Initializing Database |
| 59 | 59 | ================================================================================ |
| 60 | -Use cleanupdb.py to initialize the database. | |
| 61 | - # python tool/cleanupdb.py --config /etc/opt/pysilhouette/silhouette.conf | |
| 60 | +Use psil-cleandb to initialize the database. | |
| 61 | + # python tool/psil-cleandb --config /etc/opt/pysilhouette/silhouette.conf | |
| 62 | 62 | |
| 63 | 63 | Start Up Command |
| 64 | 64 | ================================================================================ |
| @@ -69,9 +69,9 @@ pysilhouette looks for configuration file in the default location. | ||
| 69 | 69 | |
| 70 | 70 | How to Register Jobs |
| 71 | 71 | ================================================================================ |
| 72 | -One can use setjob.py to register jobs from command line. | |
| 73 | - # python tool/setjob.py --help | |
| 74 | - usage: setjob.py [options] | |
| 72 | +One can use psil-set to register jobs from command line. | |
| 73 | + # python tool/psil-set --help | |
| 74 | + usage: psil-set [options] | |
| 75 | 75 | |
| 76 | 76 | options: |
| 77 | 77 | --version show program's version number and exit |
| @@ -80,13 +80,14 @@ One can use setjob.py to register jobs from command line. | ||
| 80 | 80 | configuration file |
| 81 | 81 | -a ACTION, --action=ACTION |
| 82 | 82 | action command |
| 83 | + -t TYPE, --type=TYPE Run type. "serial" or "parallel" | |
| 84 | + -m NAME, --name=NAME action name | |
| 83 | 85 | -r ROLLBACK, --rollback=ROLLBACK |
| 84 | 86 | rollback command |
| 85 | 87 | -f FINISH, --finish=FINISH |
| 86 | 88 | finish command |
| 87 | 89 | -n NUMBER, --number=NUMBER |
| 88 | - number of repeat job | |
| 89 | - | |
| 90 | + Test: Number of repeat job | |
| 90 | 91 | ex) |
| 91 | - # python tool/setjob.py --config /etc/opt/pysilhouette/silhouette.conf --action "/bin/echo 'action command'" | |
| 92 | + # python tool/psil-set --config /etc/opt/pysilhouette/silhouette.conf --action "/bin/echo 'action command'" --name=hoge --type=serial | |
| 92 | 93 |
| @@ -1,7 +1,7 @@ | ||
| 1 | 1 | Pysilhouetteのインストール |
| 2 | 2 | ========================== |
| 3 | 3 | |
| 4 | -Copyright (C) 2009 HDE, Inc. | |
| 4 | +Copyright (C) 2009-2010 HDE, Inc. | |
| 5 | 5 | |
| 6 | 6 | このファイルは、無制限にコピーし再配布が可能です。また、配布して変更も可能です。 |
| 7 | 7 |
| @@ -64,8 +64,8 @@ pysilhouetteで使用するデータベースの接続先を設定します。 | ||
| 64 | 64 | |
| 65 | 65 | データベースの初期化 |
| 66 | 66 | ================================================================================ |
| 67 | -cleanupdb.pyを使用して、データベースを初期化します。 | |
| 68 | - # python tool/cleanupdb.py --config /etc/opt/pysilhouette/silhouette.conf | |
| 67 | +psil-cleandbを使用して、データベースを初期化します。 | |
| 68 | + # python tool/psil-cleandb --config /etc/opt/pysilhouette/silhouette.conf | |
| 69 | 69 | |
| 70 | 70 | |
| 71 | 71 | pysilhouetteの起動 |
| @@ -79,9 +79,9 @@ pysilhouetteの起動 | ||
| 79 | 79 | |
| 80 | 80 | ジョブの登録 |
| 81 | 81 | ================================================================================ |
| 82 | -setjob.pyを使用して、コマンドラインからジョブを登録することができます。 | |
| 83 | - # python tool/setjob.py --help | |
| 84 | - usage: setjob.py [options] | |
| 82 | +psil-setを使用して、コマンドラインからジョブを登録することができます。 | |
| 83 | + # python tool/psil-set --help | |
| 84 | + usage: psil-set [options] | |
| 85 | 85 | |
| 86 | 86 | options: |
| 87 | 87 | --version show program's version number and exit |
| @@ -90,12 +90,13 @@ setjob.pyを使用して、コマンドラインからジョブを登録する | ||
| 90 | 90 | configuration file |
| 91 | 91 | -a ACTION, --action=ACTION |
| 92 | 92 | action command |
| 93 | + -t TYPE, --type=TYPE Run type. "serial" or "parallel" | |
| 94 | + -m NAME, --name=NAME action name | |
| 93 | 95 | -r ROLLBACK, --rollback=ROLLBACK |
| 94 | 96 | rollback command |
| 95 | 97 | -f FINISH, --finish=FINISH |
| 96 | 98 | finish command |
| 97 | 99 | -n NUMBER, --number=NUMBER |
| 98 | - number of repeat job | |
| 99 | - | |
| 100 | - 例) | |
| 101 | - # python tool/setjob.py --config /etc/opt/pysilhouette/silhouette.conf --action "/bin/echo 'action command'" | |
| 100 | + Test: Number of repeat job | |
| 101 | + ex) | |
| 102 | + # python tool/psil-set --config /etc/opt/pysilhouette/silhouette.conf --action "/bin/echo 'action command'" --name=hoge --type=serial |
| @@ -1,4 +1,4 @@ | ||
| 1 | -Copyright (c) 2009 HDE, Inc. | |
| 1 | +Copyright (c) 2009-2010 HDE, Inc. | |
| 2 | 2 | |
| 3 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy |
| 4 | 4 | of this software and associated documentation files (the "Software"), to deal |
| @@ -12,7 +12,7 @@ See 'INSTALL'. | ||
| 12 | 12 | License/Copying |
| 13 | 13 | ================================================================================ |
| 14 | 14 | |
| 15 | -Copyright (c) 2009 HDE, Inc. | |
| 15 | +Copyright (c) 2009-2010 HDE, Inc. | |
| 16 | 16 | |
| 17 | 17 | Permission is hereby granted, free of charge, to any person obtaining a copy |
| 18 | 18 | of this software and associated documentation files (the "Software"), to deal |
| @@ -44,57 +44,64 @@ webpy | ||
| 44 | 44 | Directory Structure |
| 45 | 45 | ================================================================================ |
| 46 | 46 | . |
| 47 | -|-- AUTHORS | |
| 47 | +|-- AUTHORS | |
| 48 | 48 | |-- ChangeLog |
| 49 | 49 | |-- INSTALL |
| 50 | 50 | |-- INSTALL.ja |
| 51 | 51 | |-- LICENSE |
| 52 | -|-- MANIFEST.in x # for distutils packaging | |
| 52 | +|-- MANIFEST.in # for distutils packaging | |
| 53 | 53 | |-- README |
| 54 | 54 | |-- README.ja # Japanese version of this file. |
| 55 | +|-- debian | |
| 56 | +| |-- README.Debian | |
| 57 | +| |-- changelog | |
| 58 | +| |-- compat | |
| 59 | +| |-- control | |
| 60 | +| |-- copyright | |
| 61 | +| |-- dirs | |
| 62 | +| |-- docs | |
| 63 | +| |-- performerd.init | |
| 64 | +| |-- postinst | |
| 65 | +| |-- postrm | |
| 66 | +| |-- preinst | |
| 67 | +| |-- prerm | |
| 68 | +| |-- pycompat | |
| 69 | +| |-- rules | |
| 70 | +| |-- schedulerd.init | |
| 71 | +| |-- silhouetted.default | |
| 72 | +| `-- silhouetted.init | |
| 55 | 73 | |-- doc |
| 56 | -| |-- epydoc.cfg # Configuration file for epydoc. | |
| 74 | +| |-- epydoc.cfg # Configuration file for epydoc | |
| 57 | 75 | | |-- log.conf.example # Example config file for logging function. |
| 58 | 76 | | |-- rc.d |
| 59 | 77 | | | `-- init.d |
| 78 | +| | |-- asynperformerd # init script for the asynperformer daemon | |
| 79 | +| | |-- asynschedulerd # init script for the asynschedulerd daemon | |
| 60 | 80 | | | |-- performerd # init script for the performer daemon |
| 61 | 81 | | | |-- schedulerd # init script for the scheduler daemon |
| 62 | 82 | | | `-- silhouetted # init script for the watch daemon |
| 63 | 83 | | |-- redhat.spec # Spec file for RPM building. |
| 64 | 84 | | |-- silhouette.conf.example # Example config file for Pysilhouette |
| 65 | -| |-- sysconfig # System config file. | |
| 85 | +| |-- sysconfig | |
| 66 | 86 | | | `-- silhouetted |
| 67 | -| |-- whitelist.conf.example # Example config file for whitelist function | |
| 68 | -| `-- wwwpysilhouette # Web interface | |
| 69 | -| |-- config.py | |
| 70 | -| |-- deletejg.py | |
| 71 | -| |-- form.py | |
| 72 | -| |-- getjg.py | |
| 73 | -| |-- index.py | |
| 74 | -| |-- job_delete.py | |
| 75 | -| |-- job_get.py | |
| 76 | -| |-- job_post.py | |
| 77 | -| |-- job_put.py | |
| 78 | -| |-- postjg.py | |
| 79 | -| |-- putjg.py | |
| 80 | -| |-- statjg.py | |
| 81 | -| |-- style.css | |
| 82 | -| |-- util.py | |
| 83 | -| `-- validate.js | |
| 87 | +| `-- whitelist.conf.example # Example config file for whitelist function | |
| 84 | 88 | |-- example # Sample programs using pysilhouette. |
| 85 | 89 | | |-- dummy.py |
| 86 | 90 | | |-- insert_dummy.py |
| 87 | 91 | | |-- sendmail.py |
| 88 | 92 | | |-- test_failure.py |
| 89 | 93 | | `-- test_success.py |
| 90 | -|-- pysilhouette # Main program. | |
| 94 | +|-- pysilhouette | |
| 91 | 95 | | |-- __init__.py |
| 96 | +| |-- asynperformer.py | |
| 97 | +| |-- asynscheduler.py | |
| 92 | 98 | | |-- command.py |
| 93 | 99 | | |-- daemon.py # Daemonizing function. |
| 94 | 100 | | |-- db # Database related files. |
| 95 | 101 | | | |-- __init__.py |
| 96 | 102 | | | |-- access.py # Database operation. |
| 97 | -| | `-- model.py # Database table model. | |
| 103 | +| | |-- model.py # Database table model. | |
| 104 | +| |-- er.py | |
| 98 | 105 | | |-- log.py |
| 99 | 106 | | |-- performer.py # Performer daemon (executes job commands) |
| 100 | 107 | | |-- prep.py # Initialize functions. |
| @@ -112,9 +119,10 @@ Directory Structure | ||
| 112 | 119 | |-- setup.cfg # Configuration for distutils packaging. |
| 113 | 120 | |-- setup.py # Main command for distutils packaging. |
| 114 | 121 | `-- tool # Tools for development/operation. |
| 115 | - |-- cleanupdb.py # Initializes database. | |
| 116 | - |-- epydoc.sh # Generates javadoc-like documents. | |
| 117 | - `-- setdummy.py # Sets some dummy job commands. | |
| 122 | + |-- epydoc.sh | |
| 123 | + |-- psil-cleandb | |
| 124 | + |-- psil-set | |
| 125 | + `-- sqlite2other.py | |
| 118 | 126 | |
| 119 | 127 | Acknowledgment |
| 120 | 128 | ================================================================================ |
| @@ -59,18 +59,38 @@ PostgreSQLを使用する場合 | ||
| 59 | 59 | |-- ChangeLog # チェンジログ |
| 60 | 60 | |-- INSTALL # 英語版インストールマニュアル |
| 61 | 61 | |-- INSTALL.ja # 日本語版インストールマニュアル |
| 62 | -|-- LICENSE # ライセンス | |
| 63 | -|-- MANIFEST.in x # distutilsを利用したパッケージングをするのに使用する。 | |
| 62 | +|-- LICENSE # ライセンス | |
| 63 | +|-- MANIFEST.in # distutilsを利用したパッケージングをするのに使用する。 | |
| 64 | 64 | |-- README # 英語語版インストールマニュアル |
| 65 | -|-- README.ja # 日本語版インストールマニュアル | |
| 65 | +|-- README.ja # 日本語版インストールマニュアル | |
| 66 | +|-- debian # 仮 | |
| 67 | +| |-- README.Debian | |
| 68 | +| |-- changelog | |
| 69 | +| |-- compat | |
| 70 | +| |-- control | |
| 71 | +| |-- copyright | |
| 72 | +| |-- dirs | |
| 73 | +| |-- docs | |
| 74 | +| |-- performerd.init | |
| 75 | +| |-- postinst | |
| 76 | +| |-- postrm | |
| 77 | +| |-- preinst | |
| 78 | +| |-- prerm | |
| 79 | +| |-- pycompat | |
| 80 | +| |-- rules | |
| 81 | +| |-- schedulerd.init | |
| 82 | +| |-- silhouetted.default | |
| 83 | +| `-- silhouetted.init | |
| 66 | 84 | |-- doc # ドキュメントや設定ファイル関連置き場 |
| 67 | 85 | | |-- epydoc.cfg # epydoc設定ファイル |
| 68 | 86 | | |-- log.conf.example # ログ設定ファイルのテンプレート |
| 69 | 87 | | |-- rc.d # 起動スクリプト |
| 70 | 88 | | | `-- init.d |
| 71 | -| | |-- performerd # パフォーマーーデーモンの起動スクリプト | |
| 72 | -| | |-- schedulerd # スケジューラーデーモンの起動スクリプト | |
| 73 | -| | `-- silhouetted # 監視デーモンの起動スクリプト | |
| 89 | +| | |-- asynperformerd # 並列処理用デーモンの起動スクリプト | |
| 90 | +| | |-- asynschedulerd # 並列処理用スケジュールデーモンの起動スクリプト | |
| 91 | +| | |-- performerd # 逐次処理用デーモンの起動スクリプト | |
| 92 | +| | |-- schedulerd # 逐次処理用スケジュールデーモンの起動スクリプト | |
| 93 | +| | `-- silhouetted # アプリケーション(監視を含む)の起動スクリプト | |
| 74 | 94 | | |-- redhat.spec # RPM用のspecファイル |
| 75 | 95 | | |-- silhouette.conf.example # Pysilhouette設定ファイルのテンプレート |
| 76 | 96 | | |-- sysconfig # システム設定ファイル |
| @@ -84,38 +104,41 @@ PostgreSQLを使用する場合 | ||
| 84 | 104 | | `-- test_success.py |
| 85 | 105 | |-- pysilhouette # プログラム本体 |
| 86 | 106 | | |-- __init__.py |
| 107 | +| |-- asynperformer.py | |
| 108 | +| |-- asynscheduler.py | |
| 87 | 109 | | |-- command.py |
| 88 | 110 | | |-- daemon.py # デーモン化で利用する関数群 |
| 89 | 111 | | |-- db # Database関連 |
| 90 | 112 | | | |-- __init__.py |
| 91 | 113 | | | |-- access.py # Databaseの操作 |
| 92 | -| | `-- model.py # Databaseのテーブルモデル | |
| 114 | +| | |-- model.py # Databaseのテーブルモデル | |
| 115 | +| |-- er.py | |
| 93 | 116 | | |-- log.py |
| 94 | -| |-- performer.py # パフォーマーデーモン(ジョブコマンドを実行する) | |
| 117 | +| |-- performer.py | |
| 95 | 118 | | |-- prep.py # 初期処理で利用する関数群 |
| 96 | -| |-- scheduler.py # スケジューラーデーモン(ジョブコマンドの実行のスケジューリング) | |
| 97 | -| |-- silhouette.py # 監視デーモン(パフォーマーデーモン、スケジューラーデーモンの監視) | |
| 119 | +| |-- scheduler.py | |
| 120 | +| |-- silhouette.py | |
| 98 | 121 | | |-- tests # テスト関連 |
| 99 | 122 | | | |-- __init__.py |
| 100 | 123 | | | |-- suite.py |
| 101 | 124 | | | |-- testprep.py |
| 102 | 125 | | | |-- testutil.py |
| 103 | 126 | | | `-- testworker.py |
| 104 | -| |-- uniqkey.py # ユニークキー | |
| 127 | +| |-- uniqkey.py # Unique key for the instance. | |
| 105 | 128 | | |-- util.py |
| 106 | 129 | | `-- worker.py |
| 107 | -|-- setup.cfg # distutilsを利用したパッケージングをするのに使用する設定ファイル。 | |
| 108 | -|-- setup.py # distutilsを利用したパッケージングをするのに使用する実行ファイル。 | |
| 130 | +|-- setup.cfg # distutilsを利用したパッケージングをするのに使用する設定ファイル | |
| 131 | +|-- setup.py # distutilsを利用したパッケージングをするのに使用する実行ファイル | |
| 109 | 132 | `-- tool # 開発時や運用時に利用するコマンドベースの実行ファイル |
| 110 | - |-- cleanupdb.py # Databaseを初期化する実行ファイル | |
| 111 | 133 | |-- epydoc.sh # Javadoc風なドキュメントを自動生成する実行ファイル |
| 112 | - `-- setjob.py # コマンドラインからジョブコマンドを登録する実行ファイル | |
| 113 | - | |
| 134 | + |-- psil-cleandb # Databaseを初期化する実行ファイル | |
| 135 | + |-- psil-set # コマンドラインからジョブコマンドを登録する実行ファイル | |
| 136 | + `-- sqlite2other.py | |
| 114 | 137 | |
| 115 | 138 | 著作権/ライセンス |
| 116 | 139 | ================================================================================ |
| 117 | 140 | |
| 118 | -Copyright (c) 2009 HDE, Inc. | |
| 141 | +Copyright (c) 2009-2010 HDE, Inc. | |
| 119 | 142 | |
| 120 | 143 | Permission is hereby granted, free of charge, to any person obtaining a copy |
| 121 | 144 | of this software and associated documentation files (the "Software"), to deal |
| @@ -26,7 +26,8 @@ frames: yes | ||
| 26 | 26 | separate-classes: no |
| 27 | 27 | # "classtree", "callgraph", "umlclass", "all" |
| 28 | 28 | graph: all |
| 29 | -dotpath: /usr/bin/dot | |
| 29 | +#dotpath: /usr/bin/dot | |
| 30 | +dotpath: /opt/hde/bin/dot | |
| 30 | 31 | #pstat: profile.out |
| 31 | 32 | graph-font: Helvetica |
| 32 | 33 | graph-font-size: 10 |
| @@ -1,8 +1,8 @@ | ||
| 1 | 1 | [loggers] |
| 2 | -keys=root,pysilhouette,pysilhouette_trace,sqlalchemy.engine | |
| 2 | +keys=root,pysilhouette,pysilhouette_trace,sqlalchemy.engine,sqlalchemy.pool,sqlalchemy.orm,pysilhouette.asynscheduler,pysilhouette.asynperformer | |
| 3 | 3 | |
| 4 | 4 | [handlers] |
| 5 | -keys=default,pysilhouette,pysilhouette_trace,sqlalchemy.engine | |
| 5 | +keys=default,pysilhouette,pysilhouette_trace,sqlalchemy.engine,sqlalchemy.pool,sqlalchemy.orm,pysilhouette.asynscheduler,pysilhouette.asynperformer | |
| 6 | 6 | |
| 7 | 7 | [formatters] |
| 8 | 8 | keys=default,common |
| @@ -36,24 +36,69 @@ class=handlers.RotatingFileHandler | ||
| 36 | 36 | formatter=common |
| 37 | 37 | args=('/var/log/pysilhouette/sql.log', 'a', (5 *1024 *1024), 5) |
| 38 | 38 | |
| 39 | +[handler_sqlalchemy.pool] | |
| 40 | +class=handlers.RotatingFileHandler | |
| 41 | +formatter=common | |
| 42 | +args=('/var/log/pysilhouette/sql.log', 'a', (5 *1024 *1024), 5) | |
| 43 | + | |
| 44 | +[handler_sqlalchemy.orm] | |
| 45 | +class=handlers.RotatingFileHandler | |
| 46 | +formatter=common | |
| 47 | +args=('/var/log/pysilhouette/sql.log', 'a', (5 *1024 *1024), 5) | |
| 48 | + | |
| 49 | +[handler_pysilhouette.asynscheduler] | |
| 50 | +class=handlers.RotatingFileHandler | |
| 51 | +formatter=common | |
| 52 | +args=('/var/log/pysilhouette/application.log', 'a', (5 *1024 *1024), 5) | |
| 53 | + | |
| 54 | +[handler_pysilhouette.asynperformer] | |
| 55 | +class=handlers.RotatingFileHandler | |
| 56 | +formatter=common | |
| 57 | +args=('/var/log/pysilhouette/application.log', 'a', (5 *1024 *1024), 5) | |
| 58 | + | |
| 39 | 59 | [logger_root] |
| 40 | 60 | level=ERROR |
| 41 | 61 | handlers=default |
| 42 | 62 | |
| 43 | 63 | [logger_pysilhouette] |
| 44 | -level=DEBUG | |
| 64 | +level=INFO | |
| 45 | 65 | handlers=pysilhouette |
| 46 | 66 | propagate=0 |
| 47 | 67 | qualname=pysilhouette |
| 48 | 68 | |
| 49 | 69 | [logger_pysilhouette_trace] |
| 50 | -level=DEBUG | |
| 70 | +level=INFO | |
| 51 | 71 | handlers=pysilhouette_trace |
| 52 | 72 | propagate=0 |
| 53 | 73 | qualname=pysilhouette_trace |
| 54 | 74 | |
| 55 | 75 | [logger_sqlalchemy.engine] |
| 56 | -level=DEBUG | |
| 76 | +level=WARNING | |
| 57 | 77 | handlers=sqlalchemy.engine |
| 58 | 78 | propagate=0 |
| 59 | 79 | qualname=sqlalchemy.engine |
| 80 | + | |
| 81 | +[logger_sqlalchemy.pool] | |
| 82 | +level=WARNING | |
| 83 | +handlers=sqlalchemy.pool | |
| 84 | +propagate=0 | |
| 85 | +qualname=sqlalchemy.pool | |
| 86 | + | |
| 87 | +[logger_sqlalchemy.orm] | |
| 88 | +level=INFO | |
| 89 | +handlers=sqlalchemy.orm | |
| 90 | +propagate=0 | |
| 91 | +qualname=sqlalchemy.orm | |
| 92 | + | |
| 93 | +[logger_pysilhouette.asynscheduler] | |
| 94 | +level=WARNING | |
| 95 | +handlers=pysilhouette.asynscheduler | |
| 96 | +propagate=0 | |
| 97 | +qualname=pysilhouette.asynscheduler | |
| 98 | + | |
| 99 | +[logger_pysilhouette.asynperformer] | |
| 100 | +level=WARNING | |
| 101 | +handlers=pysilhouette.asynperformer | |
| 102 | +propagate=0 | |
| 103 | +qualname=pysilhouette.asynperformer | |
| 104 | + |
| @@ -0,0 +1,78 @@ | ||
| 1 | +#!/bin/bash | |
| 2 | +# | |
| 3 | +# asynperformerd The stop script of the child process(asynperformer.py) of the Pysilhouette system. | |
| 4 | +# | |
| 5 | +# processname: asynperformerd | |
| 6 | +# pidfile: /var/run/asynperformerd | |
| 7 | +# lockfile: /var/lock/subsys/asynperformerd | |
| 8 | + | |
| 9 | +source /etc/rc.d/init.d/functions | |
| 10 | + | |
| 11 | +# Default value | |
| 12 | +prog="asynperformer" | |
| 13 | +progd="asynperformerd" | |
| 14 | +stop_code='2' | |
| 15 | +fifo='/tmp/pysilhouette-asynperformer.fifo' | |
| 16 | + | |
| 17 | +sysconfig="/etc/sysconfig/${progd}" | |
| 18 | + | |
| 19 | +if [ "x${PYTHON}" = "x" ]; then | |
| 20 | + PYTHON=`which python` | |
| 21 | +fi | |
| 22 | + | |
| 23 | +# Process id file. | |
| 24 | +pidfile="/var/run/${progd}.pid" | |
| 25 | + | |
| 26 | +desc="${progd} (Daemon)" | |
| 27 | + | |
| 28 | +stop() { | |
| 29 | + echo -n $"Shutting down $desc: " | |
| 30 | + if [ ! -e ${pidfile} ]; then | |
| 31 | + echo "not running..." | |
| 32 | + return 1 | |
| 33 | + fi | |
| 34 | + pid=`cat ${pidfile}` | |
| 35 | + if [ "x${pid}" == "x" ]; then | |
| 36 | + echo "not running... - not pid" | |
| 37 | + rm -f ${pidfile} | |
| 38 | + return 1 | |
| 39 | + fi | |
| 40 | + [ -w "${fifo}" ] && echo -n $stop_code >> ${fifo} | |
| 41 | + RETVAL=$? | |
| 42 | + if [ $RETVAL -eq 0 ]; then | |
| 43 | + success | |
| 44 | + else | |
| 45 | + failure | |
| 46 | + fi | |
| 47 | + echo | |
| 48 | + return $RETVAL | |
| 49 | +} | |
| 50 | + | |
| 51 | +err_msg="Please execute \"/etc/rc.d/init.d/silhouetted $1\"" | |
| 52 | + | |
| 53 | +case "$1" in | |
| 54 | + start) | |
| 55 | + echo ${err_msg} | |
| 56 | + RETVAL=1 | |
| 57 | + ;; | |
| 58 | + stop) | |
| 59 | + stop | |
| 60 | + ;; | |
| 61 | + restart|reload) | |
| 62 | + echo ${err_msg} | |
| 63 | + RETVAL=1 | |
| 64 | + ;; | |
| 65 | + condrestart) | |
| 66 | + echo ${err_msg} | |
| 67 | + RETVAL=1 | |
| 68 | + ;; | |
| 69 | + status) | |
| 70 | + status ${progd} | |
| 71 | + RETVAL=$? | |
| 72 | + ;; | |
| 73 | + *) | |
| 74 | + echo $"Usage: $0 {stop|status}" | |
| 75 | + RETVAL=1 | |
| 76 | +esac | |
| 77 | + | |
| 78 | +exit $RETVAL |
| @@ -0,0 +1,77 @@ | ||
| 1 | +#!/bin/bash | |
| 2 | +# | |
| 3 | +# asynschedulerd The stop script of the child process(asynscheduler.py) of the Pysilhouette system. | |
| 4 | +# | |
| 5 | +# processname: asynschedulerd | |
| 6 | +# pidfile: /var/run/asynschedulerd.pid | |
| 7 | +# lockfile: /var/lock/subsys/asynschedulerd | |
| 8 | + | |
| 9 | +source /etc/rc.d/init.d/functions | |
| 10 | + | |
| 11 | +# Default value | |
| 12 | +prog="asynscheduler" | |
| 13 | +progd="asynschedulerd" | |
| 14 | + | |
| 15 | +sysconfig="/etc/sysconfig/${progd}" | |
| 16 | + | |
| 17 | +if [ "x${PYTHON}" = "x" ]; then | |
| 18 | + PYTHON=`which python` | |
| 19 | +fi | |
| 20 | + | |
| 21 | +# Process id file. | |
| 22 | +pidfile="/var/run/${progd}.pid" | |
| 23 | + | |
| 24 | +desc="${progd} (Daemon)" | |
| 25 | + | |
| 26 | +stop() { | |
| 27 | + echo -n $"Shutting down $desc: " | |
| 28 | + if [ ! -e ${pidfile} ]; then | |
| 29 | + echo "not running..." | |
| 30 | + return 1 | |
| 31 | + fi | |
| 32 | +# pid=`cat ${pidfile}` | |
| 33 | +# if [ "x${pid}" == "x" ]; then | |
| 34 | +# echo "not running... - not pid" | |
| 35 | +# rm -f ${pidfile} | |
| 36 | +# return 1 | |
| 37 | +# fi | |
| 38 | + killproc -p ${pidfile} -15 | |
| 39 | +# kill ${pid} | |
| 40 | + RETVAL=$? | |
| 41 | + if [ $RETVAL -eq 0 ]; then | |
| 42 | + success | |
| 43 | + else | |
| 44 | + failure | |
| 45 | + fi | |
| 46 | + echo | |
| 47 | + return $RETVAL | |
| 48 | +} | |
| 49 | + | |
| 50 | +err_msg="Please execute \"/etc/rc.d/init.d/silhouetted $1\"" | |
| 51 | + | |
| 52 | +case "$1" in | |
| 53 | + start) | |
| 54 | + echo ${err_msg} | |
| 55 | + RETVAL=1 | |
| 56 | + ;; | |
| 57 | + stop) | |
| 58 | + stop | |
| 59 | + ;; | |
| 60 | + restart|reload) | |
| 61 | + echo ${err_msg} | |
| 62 | + RETVAL=1 | |
| 63 | + ;; | |
| 64 | + condrestart) | |
| 65 | + echo ${err_msg} | |
| 66 | + RETVAL=1 | |
| 67 | + ;; | |
| 68 | + status) | |
| 69 | + status ${progd} | |
| 70 | + RETVAL=$? | |
| 71 | + ;; | |
| 72 | + *) | |
| 73 | + echo $"Usage: $0 {stop|status}" | |
| 74 | + RETVAL=1 | |
| 75 | +esac | |
| 76 | + | |
| 77 | +exit $RETVAL |
| @@ -12,7 +12,7 @@ source /etc/rc.d/init.d/functions | ||
| 12 | 12 | prog="performer" |
| 13 | 13 | progd="performerd" |
| 14 | 14 | stop_code='2' |
| 15 | -fifo='/tmp/pysilhouette.fifo' | |
| 15 | +fifo='/tmp/pysilhouette-performer.fifo' | |
| 16 | 16 | |
| 17 | 17 | sysconfig="/etc/sysconfig/${progd}" |
| 18 | 18 |
| @@ -10,9 +10,15 @@ | ||
| 10 | 10 | # pidfile: /var/run/silhouetted.pid |
| 11 | 11 | # /var/run/schedulerd.pid |
| 12 | 12 | # /var/run/performerd.pid |
| 13 | +# /var/run/asynperformerd.pid | |
| 14 | +# /var/run/asynschedulerd.pid | |
| 15 | +# | |
| 13 | 16 | # lockfile: /var/lock/subsys/silhouetted |
| 14 | 17 | # /var/lock/subsys/schedulerd |
| 15 | 18 | # /var/lock/subsys/performerd |
| 19 | +# /var/lock/subsys/asynperformerd | |
| 20 | +# /var/lock/subsys/asynschedulerd | |
| 21 | +# | |
| 16 | 22 | |
| 17 | 23 | source /etc/rc.d/init.d/functions |
| 18 | 24 | source /etc/sysconfig/network |
| @@ -33,6 +39,8 @@ progd="silhouetted" | ||
| 33 | 39 | app="pysilhouette" |
| 34 | 40 | sch_progd='schedulerd' |
| 35 | 41 | per_progd='performerd' |
| 42 | +asyn_sch_progd='asynschedulerd' | |
| 43 | +asyn_per_progd='asynperformerd' | |
| 36 | 44 | |
| 37 | 45 | sysconfig="/etc/sysconfig/${progd}" |
| 38 | 46 |
| @@ -53,6 +61,10 @@ sch_pidfile="/var/run/${sch_progd}.pid" | ||
| 53 | 61 | sch_lockfile="/var/lock/subsys/${sch_progd}" |
| 54 | 62 | per_pidfile="/var/run/${per_progd}.pid" |
| 55 | 63 | per_lockfile="/var/lock/subsys/${per_progd}" |
| 64 | +asyn_sch_pidfile="/var/run/${asyn_sch_progd}.pid" | |
| 65 | +asyn_sch_lockfile="/var/lock/subsys/${asyn_sch_progd}" | |
| 66 | +asyn_per_pidfile="/var/run/${asyn_per_progd}.pid" | |
| 67 | +asyn_per_lockfile="/var/lock/subsys/${asyn_per_progd}" | |
| 56 | 68 | |
| 57 | 69 | # Daemon mode. |
| 58 | 70 | extra_args="" |
| @@ -78,15 +90,15 @@ start() { | ||
| 78 | 90 | return 1 |
| 79 | 91 | fi |
| 80 | 92 | |
| 81 | - touch ${pidfile} ${sch_pidfile} ${per_pidfile} | |
| 82 | - chown ${USER}:${GROUP} ${pidfile} ${sch_pidfile} ${per_pidfile} | |
| 93 | + touch ${pidfile} ${sch_pidfile} ${per_pidfile} ${asyn_sch_pidfile} ${asyn_per_pidfile} | |
| 94 | + chown ${USER}:${GROUP} ${pidfile} ${sch_pidfile} ${per_pidfile} ${asyn_sch_pidfile} ${asyn_per_pidfile} | |
| 83 | 95 | if [ "x${PYTHON_SEARCH_PATH}" != "x" ]; then |
| 84 | 96 | env="PYTHONPATH=${PYTHON_SEARCH_PATH}:\$PYTHONPATH" |
| 85 | 97 | fi |
| 86 | 98 | ${SU} -l ${USER} -c "${env} ${PYTHON} ${PREFIX}/opt/pysilhouette/bin/${prog}.py ${CMD_ARGS}" |
| 87 | 99 | RETVAL=$? |
| 88 | 100 | if [ ${RETVAL} -eq 0 ]; then |
| 89 | - touch ${lockfile} ${sch_lockfile} ${per_lockfile} | |
| 101 | + touch ${lockfile} ${sch_lockfile} ${per_lockfile} ${asyn_sch_lockfile} ${asyn_per_lockfile} | |
| 90 | 102 | success |
| 91 | 103 | else |
| 92 | 104 | failure |
| @@ -120,24 +132,46 @@ stop() { | ||
| 120 | 132 | if [ ${SIL_RETVAL} -eq 0 ]; then |
| 121 | 133 | rm -f ${lockfile} |
| 122 | 134 | rm -f ${pidfile} |
| 123 | - fi | |
| 135 | + fi | |
| 136 | + | |
| 137 | + # scheduler | |
| 124 | 138 | eval "/etc/rc.d/init.d/${sch_progd} stop" |
| 125 | 139 | SCH_RETVAL=$? |
| 126 | 140 | if [ ${SCH_RETVAL} -eq 0 ]; then |
| 127 | 141 | rm -f ${sch_lockfile} |
| 128 | 142 | rm -f ${sch_pidfile} |
| 129 | - fi | |
| 143 | + fi | |
| 144 | + | |
| 145 | + # performer | |
| 130 | 146 | eval "/etc/rc.d/init.d/${per_progd} stop" |
| 131 | 147 | PER_RETVAL=$? |
| 132 | 148 | if [ ${PER_RETVAL} -eq 0 ]; then |
| 133 | 149 | rm -f ${per_lockfile} |
| 134 | 150 | rm -f ${per_pidfile} |
| 135 | - fi | |
| 151 | + fi | |
| 152 | + | |
| 153 | + # asynscheduler | |
| 154 | + eval "/etc/rc.d/init.d/${asyn_sch_progd} stop" | |
| 155 | + ASYN_SCH_RETVAL=$? | |
| 156 | + if [ ${ASYN_SCH_RETVAL} -eq 0 ]; then | |
| 157 | + rm -f ${asyn_sch_lockfile} | |
| 158 | + rm -f ${asyn_sch_pidfile} | |
| 159 | + fi | |
| 160 | + | |
| 161 | + # asynperformer | |
| 162 | + eval "/etc/rc.d/init.d/${asyn_per_progd} stop" | |
| 163 | + ASYN_PER_RETVAL=$? | |
| 164 | + if [ ${ASYN_PER_RETVAL} -eq 0 ]; then | |
| 165 | + rm -f ${asyn_per_lockfile} | |
| 166 | + rm -f ${asyn_per_pidfile} | |
| 167 | + fi | |
| 168 | + | |
| 136 | 169 | # The return code of the performer daemon is the first digit. |
| 137 | 170 | # The return code of the scheduler daemon is the second digit. |
| 138 | 171 | # The return code of the silhouetted daemon is the third digit. |
| 139 | 172 | # All stop functions return only the exit code of 0(Normal) or 1(Abnormal). |
| 140 | - RETVAL=`expr ${SIL_RETVAL} \* 100 + ${SCH_RETVAL} \* 10 + ${PER_RETVAL}` | |
| 173 | + #RETVAL=`expr ${SIL_RETVAL} \* 100 + ${SCH_RETVAL} \* 10 + ${PER_RETVAL}` | |
| 174 | + RETVAL=`expr ${SIL_RETVAL} \* 10000 + ${SCH_RETVAL} \* 1000 + ${PER_RETVAL} \* 100 + ${ASYN_SCH_RETVAL} \* 10 + ${ASYN_PER_RETVAL}` | |
| 141 | 175 | return ${RETVAL} |
| 142 | 176 | } |
| 143 | 177 |
| @@ -166,6 +200,8 @@ case "$1" in | ||
| 166 | 200 | status ${progd} |
| 167 | 201 | eval "/etc/rc.d/init.d/${sch_progd} status" |
| 168 | 202 | eval "/etc/rc.d/init.d/${per_progd} status" |
| 203 | + eval "/etc/rc.d/init.d/${asyn_sch_progd} status" | |
| 204 | + eval "/etc/rc.d/init.d/${asyn_per_progd} status" | |
| 169 | 205 | RETVAL=$? |
| 170 | 206 | ;; |
| 171 | 207 | *) |
| @@ -1,6 +1,6 @@ | ||
| 1 | 1 | %define name pysilhouette |
| 2 | -%define version 0.6.3 | |
| 3 | -%define release 2 | |
| 2 | +%define version 0.7.0 | |
| 3 | +%define release 1 | |
| 4 | 4 | %define date %(echo `LANG=C date +%%Y%%m%%d%%H%%M%%S`) |
| 5 | 5 | |
| 6 | 6 | %define _prefix /opt |
| @@ -22,7 +22,7 @@ | ||
| 22 | 22 | %define _uid_max 350 |
| 23 | 23 | |
| 24 | 24 | Summary: A python-based background job manager |
| 25 | -Summary(ja): オープンソースのバックグラウンドジョブマネージャー | |
| 25 | +Summary(ja): オープンソースバックグラウンドジョブマネージャー | |
| 26 | 26 | Name: %{name} |
| 27 | 27 | Version: %{version} |
| 28 | 28 | #Release: %{release}.%{date} |
| @@ -71,8 +71,10 @@ install -c -m 644 doc/whitelist.conf.example $RPM_BUILD_ROOT%{__sysconfdir}/whit | ||
| 71 | 71 | install -c -m 644 doc/rc.d/init.d/* $RPM_BUILD_ROOT%{_initrddir}/ |
| 72 | 72 | install -c -m 644 doc/sysconfig/%{__progd} $RPM_BUILD_ROOT/etc/sysconfig/%{__progd} |
| 73 | 73 | |
| 74 | -chmod +x $RPM_BUILD_ROOT%{python_sitelib}/%{__app}/%{__prog}.py | |
| 75 | -%{__ln_s} %{python_sitelib}/%{__app}/%{__prog}.py $RPM_BUILD_ROOT%{__bindir} | |
| 74 | +install -c -m 744 tool/psil-cleandb $RPM_BUILD_ROOT%{__bindir} | |
| 75 | +install -c -m 744 tool/psil-set $RPM_BUILD_ROOT%{__bindir} | |
| 76 | + | |
| 77 | +chmod +x %{__app}/%{__prog}.py | |
| 76 | 78 | |
| 77 | 79 | %clean |
| 78 | 80 | rm -rf $RPM_BUILD_ROOT |
| @@ -111,6 +113,8 @@ if [ $? -ne 0 ]; then | ||
| 111 | 113 | fi |
| 112 | 114 | |
| 113 | 115 | %post |
| 116 | +#echo "%{__ln_s} %{python_sitelib}/%{__app}/%{__prog}.py %{__bindir}" | |
| 117 | +%{__ln_s} %{python_sitelib}/%{__app}/%{__prog}.py %{__bindir} | |
| 114 | 118 | if [ ! -e %{__datadir} ]; then |
| 115 | 119 | mkdir -p %{__datadir} 2> /dev/null |
| 116 | 120 | fi |
| @@ -119,6 +123,7 @@ fi | ||
| 119 | 123 | #%{_initrddir}/silhouetted start >/dev/null 2>&1 |
| 120 | 124 | |
| 121 | 125 | %postun |
| 126 | +rm -f %{__bindir}/%{__prog}.py | |
| 122 | 127 | if [ $1 = 0 ]; then |
| 123 | 128 | /usr/sbin/userdel %{_user} 2> /dev/null || : |
| 124 | 129 | /usr/sbin/groupdel %{_group} 2> /dev/null || : |
| @@ -138,12 +143,26 @@ fi | ||
| 138 | 143 | %attr(0644, root, root) %config(noreplace) %{__sysconfdir}/whitelist.conf |
| 139 | 144 | %attr(0644, root, root) %{__sysconfdir}/whitelist.conf.example |
| 140 | 145 | %attr(0644, root, root) %config(noreplace) /etc/sysconfig/%{__progd} |
| 141 | -%{__bindir}/%{__prog}.py | |
| 146 | +#%{__bindir}/%{__prog}.py | |
| 147 | +%{__bindir}/psil-cleandb | |
| 148 | +%{__bindir}/psil-set | |
| 142 | 149 | %dir /var/log/%{__app} |
| 143 | 150 | |
| 144 | 151 | %changelog |
| 145 | -* Fri Jun 04 2010 Kei Funagayama <kei.topaz@gmail.com> - 0.6.3-2 | |
| 146 | -- bugfix: http://list.karesansui-project.info/pipermail/karesansui-users-jp/2010-June/000063.html | |
| 152 | +* Fri Jul 29 2010 Kei Funagayama <kei.topaz@gmail.com> - 0.7.0-1 | |
| 153 | +- 0.7.0 released. | |
| 154 | + | |
| 155 | +* Tue Jun 29 2010 Kei Funagayama <kei.topaz@gmail.com> - 0.7.0-1beta3 | |
| 156 | +- 0.7.0 beta3 released. | |
| 157 | + | |
| 158 | +* Fri Apr 09 2010 Kei Funagayama <kei.topaz@gmail.com> - 0.7.0-1beta2 | |
| 159 | +- 0.7.0 beta2 released. | |
| 160 | + | |
| 161 | +* Fri Apr 09 2010 Kei Funagayama <kei.topaz@gmail.com> - 0.7.0-1beta1 | |
| 162 | +- 0.7.0 beta1 released. | |
| 163 | + | |
| 164 | +* Wed Jan 03 2010 Kei Funagayama <kei@karesansui-project.info> - 0.7.0-1alpha1 | |
| 165 | +- 0.7.0 alpha1 released. | |
| 147 | 166 | |
| 148 | 167 | * Tue Aug 04 2009 Kei Funagayama <kei@karesansui-project.info> - 0.6.3-1 |
| 149 | 168 | - Add Command Tools. |
| @@ -161,4 +180,3 @@ fi | ||
| 161 | 180 | |
| 162 | 181 | * Tue Feb 4 2009 HDE Package Maintainer <info@hde.co.jp> - 0.1.0-1 |
| 163 | 182 | - Initial build. |
| 164 | - |
| @@ -18,31 +18,49 @@ daemon.stderr=/var/log/pysilhouette/stderr.log | ||
| 18 | 18 | observer.target.python=/usr/bin/python |
| 19 | 19 | observer.target.scheduler=/opt/pysilhouette/lib/python/pysilhouette/scheduler.py |
| 20 | 20 | observer.target.performer=/opt/pysilhouette/lib/python/pysilhouette/performer.py |
| 21 | +observer.target.asynscheduler=/opt/pysilhouette/lib/python/pysilhouette/asynscheduler.py | |
| 22 | +observer.target.asynperformer=/opt/pysilhouette/lib/python/pysilhouette/asynperformer.py | |
| 21 | 23 | observer.restart.count=5 |
| 22 | 24 | # - Clear intervals. 0=Infinite |
| 23 | -observer.restart.count.clear.time=300 | |
| 25 | +observer.restart.count.clear.time=600 | |
| 24 | 26 | # - Check interval |
| 25 | -observer.check.interval=5 | |
| 27 | +observer.check.interval=300 | |
| 26 | 28 | # - Output status information |
| 27 | 29 | observer.status.path=/var/opt/pysilhouette/status |
| 28 | -# - mkfifo | |
| 29 | -observer.mkfifo.path=/tmp/pysilhouette.fifo | |
| 30 | -observer.mkfifo.start.code=0 | |
| 31 | -observer.mkfifo.ignore.code=1 | |
| 32 | -observer.mkfifo.stop.code=2 | |
| 33 | -observer.mkfifo.user.name=pysilhouette | |
| 34 | -observer.mkfifo.group.name=pysilhouette | |
| 35 | -observer.mkfifo.perms=0666 | |
| 30 | + | |
| 31 | +## | |
| 32 | +# performer | |
| 33 | +performer.mkfifo.group.name=pysilhouette | |
| 34 | +performer.mkfifo.stop.code=2 | |
| 35 | +performer.mkfifo.ignore.code=1 | |
| 36 | +performer.mkfifo.path=/tmp/pysilhouette-performer.fifo | |
| 37 | +performer.mkfifo.start.code=0 | |
| 38 | +performer.mkfifo.user.name=pysilhouette | |
| 39 | +performer.mkfifo.perms=0666 | |
| 40 | + | |
| 41 | +## | |
| 42 | +# asynperformer | |
| 43 | +asynperformer.mkfifo.group.name=pysilhouette | |
| 44 | +asynperformer.mkfifo.stop.code=2 | |
| 45 | +asynperformer.mkfifo.ignore.code=1 | |
| 46 | +asynperformer.mkfifo.path=/tmp/pysilhouette-asynperformer.fifo | |
| 47 | +asynperformer.mkfifo.start.code=0 | |
| 48 | +asynperformer.mkfifo.user.name=pysilhouette | |
| 49 | +asynperformer.mkfifo.perms=0666 | |
| 50 | +asynscheduler.interval=10 | |
| 51 | +asynperformer.thread.pool.size=5 | |
| 36 | 52 | |
| 37 | 53 | ## |
| 38 | 54 | # scheduler |
| 39 | 55 | scheduler.interval=10 |
| 40 | 56 | |
| 57 | + | |
| 41 | 58 | ## |
| 42 | 59 | # job |
| 43 | 60 | job.popen.env.lang=C |
| 44 | 61 | job.popen.timeout=3600 |
| 45 | -job.popen.waittime=10 | |
| 62 | +job.popen.waittime=1 | |
| 63 | +job.popen.output.limit=1048576 | |
| 46 | 64 | # 1 or Other |
| 47 | 65 | job.whitelist.flag=1 |
| 48 | 66 | job.whitelist.path=/etc/opt/pysilhouette/whitelist.conf |
| @@ -50,27 +68,28 @@ job.whitelist.path=/etc/opt/pysilhouette/whitelist.conf | ||
| 50 | 68 | ## |
| 51 | 69 | # Database RFC-1738 style URLs. |
| 52 | 70 | # - driver://username:password@host:port/database |
| 53 | -# | |
| 71 | +# | |
| 54 | 72 | # postgresql : |
| 55 | -# database.url=postgres://silhouette:<password>@localhost:5432/silhouette | |
| 73 | +# database.url=postgres://silhouette:<password>@localhost:5432/silhouette | |
| 56 | 74 | # |
| 57 | -# mysql : | |
| 58 | -# database.url=mysql://localhost/silhouette | |
| 59 | -# or | |
| 60 | -# database.url=mysql://silhouette:<password>@localhost/silhouette | |
| 75 | +# mysql : | |
| 76 | +# database.url=mysql://localhost/silhouette | |
| 77 | +# or | |
| 78 | +# database.url=mysql://silhouette:<password>@localhost/silhouette | |
| 61 | 79 | # |
| 62 | 80 | # oracle |
| 63 | 81 | # database.url=oracle://scott:tiger@dsn - TNS |
| 64 | -# database.url=oracle://scott:tiger@127.0.0.1:1521/sidname - host/port/SID | |
| 82 | +# database.url=oracle://scott:tiger@127.0.0.1:1521/sidname - host/port/SID | |
| 65 | 83 | # |
| 66 | 84 | # sqlite |
| 67 | 85 | # database.url=sqlite:////absolute/path/to/silhouette.db - absolute path |
| 68 | -# database.url=sqlite:///relative/path/to/silhouette.db - relative path | |
| 69 | -# database.url=sqlite:// - in memory | |
| 70 | -# database.url=sqlite://:memory: - in memory | |
| 86 | +# database.url=sqlite:///relative/path/to/silhouette.db - relative path | |
| 87 | +# database.url=sqlite:// - in memory | |
| 88 | +# database.url=sqlite://:memory: - in memory | |
| 71 | 89 | # |
| 72 | 90 | #database.url=sqlite:///:memory: |
| 73 | 91 | database.url=sqlite:////var/opt/pysilhouette/pysilhouette.db |
| 74 | 92 | database.pool.status=0 |
| 75 | 93 | database.pool.max.overflow=10 |
| 76 | 94 | database.pool.size=1 |
| 95 | + |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -28,9 +28,13 @@ | ||
| 28 | 28 | @author: Kei Funagayama <kei@karesansui-project.info> |
| 29 | 29 | """ |
| 30 | 30 | |
| 31 | -__version__ = '0.6' | |
| 32 | -__release__ = '3' | |
| 31 | +__version__ = '0.7' | |
| 32 | +__release__ = '0' | |
| 33 | 33 | __app__ = 'pysilhouette' |
| 34 | 34 | |
| 35 | +# DEFINE | |
| 36 | +PROCERROR = 1 | |
| 37 | +PROCSUCCESS = 0 | |
| 38 | + | |
| 35 | 39 | class SilhouetteException(StandardError): |
| 36 | 40 | pass |
| @@ -0,0 +1,163 @@ | ||
| 1 | +#!/usr/bin/env python | |
| 2 | +# -*- coding: utf-8 -*- | |
| 3 | +# | |
| 4 | +# This file is part of Pysilhouette. | |
| 5 | +# | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | +# | |
| 8 | +# Permission is hereby granted, free of charge, to any person obtaining a copy | |
| 9 | +# of this software and associated documentation files (the "Software"), to deal | |
| 10 | +# in the Software without restriction, including without limitation the rights | |
| 11 | +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| 12 | +# copies of the Software, and to permit persons to whom the Software is | |
| 13 | +# furnished to do so, subject to the following conditions: | |
| 14 | +# | |
| 15 | +# The above copyright notice and this permission notice shall be included in | |
| 16 | +# all copies or substantial portions of the Software. | |
| 17 | +# | |
| 18 | +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| 19 | +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| 20 | +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
| 21 | +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| 22 | +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| 23 | +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
| 24 | +# THE SOFTWARE. | |
| 25 | +# | |
| 26 | + | |
| 27 | +""" | |
| 28 | +@author: Kei Funagayama <kei@karesansui-project.info> | |
| 29 | +""" | |
| 30 | +import sys | |
| 31 | +import os | |
| 32 | +import logging | |
| 33 | +import traceback | |
| 34 | +import signal | |
| 35 | +import Queue | |
| 36 | + | |
| 37 | +from pysilhouette import PROCERROR, PROCSUCCESS | |
| 38 | +from pysilhouette.log import reload_conf | |
| 39 | +from pysilhouette.prep import readconf, getopts, chkopts, parse_conf | |
| 40 | +from pysilhouette.db.model import JOBGROUP_STATUS, JOBGROUP_TYPE | |
| 41 | +from pysilhouette.db.access import jobgroup_findbytype_limit_status, jobgroup_update | |
| 42 | +from pysilhouette.er import ER | |
| 43 | +from pysilhouette.worker import ThreadQueue, ThreadWorker, dummy_set_job | |
| 44 | +from pysilhouette.db import create_database, Database | |
| 45 | + | |
| 46 | +# var | |
| 47 | +asynpool = [] | |
| 48 | + | |
| 49 | +class AsynPerformer(ER): | |
| 50 | + """AsynPerformer Class | |
| 51 | + """ | |
| 52 | + def __init__(self, opts, cf): | |
| 53 | + ER.__init__(self, opts, cf) | |
| 54 | + self._fifo('asynperformer') | |
| 55 | + self._setdaemon() | |
| 56 | + self.db = create_database(cf) | |
| 57 | + | |
| 58 | + def process(self): | |
| 59 | + self.logger.info('asynperformer : [started]') | |
| 60 | + | |
| 61 | + # thread pool | |
| 62 | + request_queue = Queue.Queue() | |
| 63 | + #response_queue = Queue.Queue() | |
| 64 | + response_list = [] | |
| 65 | + tq = ThreadQueue(request_queue, response_list) | |
| 66 | + | |
| 67 | + while True: | |
| 68 | + fp = open(self.cf["asynperformer.mkfifo.path"], 'r') | |
| 69 | + try: | |
| 70 | + code = fp.read() | |
| 71 | + finally: | |
| 72 | + fp.close() | |
| 73 | + | |
| 74 | + #self.logger.info('Received code from the FIFO file. - code=%s' % code) | |
| 75 | + session = self.db.get_session() | |
| 76 | + | |
| 77 | + # Pending JobGroup search | |
| 78 | + if self.cf['asynperformer.thread.pool.size'] <= tq.now_alive(): | |
| 79 | + continue | |
| 80 | + | |
| 81 | + m_jgs = jobgroup_findbytype_limit_status(session, | |
| 82 | + JOBGROUP_TYPE['PARALLEL'], | |
| 83 | + self.cf['asynperformer.thread.pool.size'] - tq.now_alive()) | |
| 84 | + | |
| 85 | + session.close() | |
| 86 | + #self.logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' % len(m_jgs)) | |
| 87 | + #self.logger.debug('filo code=%s, cf asynperformer.mkfifo.start.code=%s' % (code, self.cf["asynperformer.mkfifo.start.code"])) | |
| 88 | + self.logger.info('Activity Information. - [fifo_code=%s, type=serial, jobgroup_num=%d]' % (code, len(m_jgs))) | |
| 89 | + if code == self.cf["asynperformer.mkfifo.start.code"]: | |
| 90 | + if 0 < len(m_jgs): | |
| 91 | + for m_jg in m_jgs: | |
| 92 | + try: | |
| 93 | + tq.put(ThreadWorker(self.cf, self.db, m_jg.id)) # thread worker!! start | |
| 94 | + except Exception, e: | |
| 95 | + self.logger.debug('Failed to perform the job group. Exceptions are not expected. - jobgroup_id=%d : %s' | |
| 96 | + % (m_jg.id, str(e.args))) | |
| 97 | + print >>sys.stderr, traceback.format_exc() | |
| 98 | + t_logger = logging.getLogger('pysilhouette_traceback') | |
| 99 | + t_logger.error(traceback.format_exc()) | |
| 100 | + else: | |
| 101 | + self.logger.debug('No Job Group.') | |
| 102 | + elif code == self.cf["asynperformer.mkfifo.stop.code"]: | |
| 103 | + self.logger.warning('Received stop code from the FIFO file. - code=%s' % code) | |
| 104 | + break | |
| 105 | + else: | |
| 106 | + self.logger.warning('Received illegal code from the FIFO file. - code=%s' % code) | |
| 107 | + | |
| 108 | +def sigterm_handler(signum, frame): | |
| 109 | + logger = logging.getLogger('pysilhouette.asynperformer') | |
| 110 | + logger.info('Stop the AsynPerformerd with signal - pid=%s, signal=%s' % (os.getpid(), signum)) | |
| 111 | + for x in asynpool : | |
| 112 | + os.kill(x.pid, signum) | |
| 113 | + sys.exit(os.getpid()) | |
| 114 | + | |
| 115 | +def main(): | |
| 116 | + (opts, args) = getopts() | |
| 117 | + if chkopts(opts) is True: | |
| 118 | + return PROCERROR | |
| 119 | + | |
| 120 | + cf = readconf(opts.config) | |
| 121 | + if cf is None: | |
| 122 | + print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0]) | |
| 123 | + return PROCERROR | |
| 124 | + | |
| 125 | + # conf parse | |
| 126 | + if parse_conf(cf) is False: | |
| 127 | + return PROCERROR | |
| 128 | + | |
| 129 | + # set env=PYSILHOUETTE_CONF | |
| 130 | + os.environ['PYSILHOUETTE_CONF'] = opts.config | |
| 131 | + | |
| 132 | + if reload_conf(cf["env.sys.log.conf.path"]): | |
| 133 | + logger = logging.getLogger('pysilhouette.asynperformer') | |
| 134 | + else: | |
| 135 | + print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0] | |
| 136 | + return PROCERROR | |
| 137 | + | |
| 138 | + try: | |
| 139 | + try: | |
| 140 | + signal.signal(signal.SIGTERM, sigterm_handler) | |
| 141 | + asynperformer = AsynPerformer(opts, cf) | |
| 142 | + ret = asynperformer.process() # start!! | |
| 143 | + return ret | |
| 144 | + except KeyboardInterrupt, k: | |
| 145 | + logger.critical('Keyboard interrupt occurred. - %s' % str(k.args)) | |
| 146 | + print >>sys.stderr, 'Keyboard interrupt occurred. - %s' % str(k.args) | |
| 147 | + except Exception, e: | |
| 148 | + logger.critical('System error has occurred. - %s' % str(e.args)) | |
| 149 | + print >>sys.stderr, 'System error has occurred. - %s' % str(e.args) | |
| 150 | + print >>sys.stderr, traceback.format_exc() | |
| 151 | + t_logger = logging.getLogger('pysilhouette_traceback') | |
| 152 | + t_logger.critical(traceback.format_exc()) | |
| 153 | + | |
| 154 | + finally: | |
| 155 | + if opts.daemon is True and os.path.isfile(opts.pidfile): | |
| 156 | + os.unlink(opts.pidfile) | |
| 157 | + logger.info('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
| 158 | + | |
| 159 | + return PROCERROR | |
| 160 | + | |
| 161 | + | |
| 162 | +if __name__ == '__main__': | |
| 163 | + sys.exit(main()) |
| @@ -0,0 +1,128 @@ | ||
| 1 | +#!/usr/bin/env python | |
| 2 | +# -*- coding: utf-8 -*- | |
| 3 | +# | |
| 4 | +# This file is part of Pysilhouette. | |
| 5 | +# | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | +# | |
| 8 | +# Permission is hereby granted, free of charge, to any person obtaining a copy | |
| 9 | +# of this software and associated documentation files (the "Software"), to deal | |
| 10 | +# in the Software without restriction, including without limitation the rights | |
| 11 | +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| 12 | +# copies of the Software, and to permit persons to whom the Software is | |
| 13 | +# furnished to do so, subject to the following conditions: | |
| 14 | +# | |
| 15 | +# The above copyright notice and this permission notice shall be included in | |
| 16 | +# all copies or substantial portions of the Software. | |
| 17 | +# | |
| 18 | +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| 19 | +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| 20 | +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
| 21 | +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| 22 | +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| 23 | +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
| 24 | +# THE SOFTWARE. | |
| 25 | +# | |
| 26 | + | |
| 27 | +""" | |
| 28 | +@author: Kei Funagayama <kei@karesansui-project.info> | |
| 29 | +""" | |
| 30 | + | |
| 31 | +import os | |
| 32 | +import sys | |
| 33 | +import time | |
| 34 | +import signal | |
| 35 | +import traceback | |
| 36 | +import logging | |
| 37 | + | |
| 38 | +from pysilhouette import PROCERROR, PROCSUCCESS | |
| 39 | +from pysilhouette.er import ER | |
| 40 | +from pysilhouette.db import create_database, Database | |
| 41 | +from pysilhouette.log import reload_conf | |
| 42 | +from pysilhouette.prep import readconf, getopts, chkopts, parse_conf | |
| 43 | + | |
| 44 | +ENTITYS = ('asynperformer',) | |
| 45 | + | |
| 46 | +class AsynScheduler(ER): | |
| 47 | + """AsynScheduler Class | |
| 48 | + """ | |
| 49 | + def __init__(self, opts, cf): | |
| 50 | + ER.__init__(self, opts, cf) | |
| 51 | + for entity in ENTITYS: | |
| 52 | + self._fifo(entity) | |
| 53 | + | |
| 54 | + self._setdaemon() | |
| 55 | + create_database(cf) | |
| 56 | + | |
| 57 | + def process(self): | |
| 58 | + self.logger.info('asynscheduler : [started]') | |
| 59 | + while True: | |
| 60 | + try: | |
| 61 | + for entity in ENTITYS: | |
| 62 | + fp = open(self.cf["%s.mkfifo.path" % entity], 'w') | |
| 63 | + try: | |
| 64 | + fp.write(str(self.cf['%s.mkfifo.start.code' % entity])) | |
| 65 | + self.logger.info('Start code was written. - file=%s : code=%s' | |
| 66 | + % (self.cf["%s.mkfifo.path" % entity], self.cf['%s.mkfifo.start.code' % entity])) | |
| 67 | + | |
| 68 | + finally: | |
| 69 | + fp.close() | |
| 70 | + | |
| 71 | + self.logger.debug('interval start, interval=%s' % (self.cf['asynscheduler.interval'])) | |
| 72 | + time.sleep(self.cf['asynscheduler.interval']) | |
| 73 | + | |
| 74 | + except IOError, i: | |
| 75 | + if i.errno == 4: | |
| 76 | + return PROCSUCCESS # When ending with the signal | |
| 77 | + | |
| 78 | + return PROCERROR # beyond expectation | |
| 79 | + | |
| 80 | +def sigterm_handler(signum, frame): | |
| 81 | + logger = logging.getLogger('pysilhouette.asynscheduler.signal') | |
| 82 | + logger.warning('Stop the asynschedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
| 83 | + | |
| 84 | +def main(): | |
| 85 | + (opts, args) = getopts() | |
| 86 | + if chkopts(opts) is True: | |
| 87 | + return PROCERROR | |
| 88 | + | |
| 89 | + cf = readconf(opts.config) | |
| 90 | + if cf is None: | |
| 91 | + print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0]) | |
| 92 | + return PROCERROR | |
| 93 | + | |
| 94 | + # conf parse | |
| 95 | + if parse_conf(cf) is False: | |
| 96 | + return PROCERROR | |
| 97 | + | |
| 98 | + if reload_conf(cf["env.sys.log.conf.path"]): | |
| 99 | + logger = logging.getLogger('pysilhouette.asynscheduler') | |
| 100 | + else: | |
| 101 | + print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0] | |
| 102 | + return PROCERROR | |
| 103 | + | |
| 104 | + try: | |
| 105 | + try: | |
| 106 | + signal.signal(signal.SIGTERM, sigterm_handler) | |
| 107 | + asynscheduler = AsynScheduler(opts, cf) | |
| 108 | + ret = asynscheduler.process() # start!! | |
| 109 | + return ret | |
| 110 | + except KeyboardInterrupt, k: | |
| 111 | + logger.critical('Keyboard interrupt occurred. - %s' % str(k.args)) | |
| 112 | + print >>sys.stderr, 'Keyboard interrupt occurred. - %s' % str(k.args) | |
| 113 | + except Exception, e: | |
| 114 | + logger.critical('A system error has occurred. - %s' % str(e.args)) | |
| 115 | + print >>sys.stderr, 'A system error has occurred. - %s' % str(e.args) | |
| 116 | + print >>sys.stderr, traceback.format_exc() | |
| 117 | + t_logger = logging.getLogger('pysilhouette_traceback') | |
| 118 | + t_logger.critical(traceback.format_exc()) | |
| 119 | + | |
| 120 | + finally: | |
| 121 | + if opts.daemon is True and os.path.isfile(opts.pidfile): | |
| 122 | + os.unlink(opts.pidfile) | |
| 123 | + logger.warning('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
| 124 | + | |
| 125 | + return PROCERROR | |
| 126 | + | |
| 127 | +if __name__ == '__main__': | |
| 128 | + sys.exit(main()) |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -36,8 +36,8 @@ import subprocess | ||
| 36 | 36 | import signal |
| 37 | 37 | import logging |
| 38 | 38 | |
| 39 | -import pysilhouette | |
| 40 | 39 | import pysilhouette.log |
| 40 | +from pysilhouette import PROCERROR | |
| 41 | 41 | from pysilhouette.util import astrftime |
| 42 | 42 | from pysilhouette.util import kill_proc |
| 43 | 43 |
| @@ -52,11 +52,11 @@ def observer(opts, cf): | ||
| 52 | 52 | """ |
| 53 | 53 | def scheduler(): |
| 54 | 54 | cmd = [cf['observer.target.python'], cf['observer.target.scheduler']] |
| 55 | - if cmd_args: | |
| 55 | + if cmd_args: | |
| 56 | 56 | cmd.extend(cmd_args) |
| 57 | 57 | if opts.daemon is True: |
| 58 | 58 | cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/schedulerd.pid']) |
| 59 | - | |
| 59 | + | |
| 60 | 60 | logger.debug('scheduler:popen - cmd=%s' % cmd) |
| 61 | 61 | return subprocess.Popen(args=cmd, |
| 62 | 62 | close_fds=True, |
| @@ -65,7 +65,7 @@ def observer(opts, cf): | ||
| 65 | 65 | |
| 66 | 66 | def performer(): |
| 67 | 67 | cmd = [cf['observer.target.python'], cf['observer.target.performer']] |
| 68 | - if cmd_args: | |
| 68 | + if cmd_args: | |
| 69 | 69 | cmd.extend(cmd_args) |
| 70 | 70 | if opts.daemon is True: |
| 71 | 71 | cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/performerd.pid']) |
| @@ -76,6 +76,32 @@ def observer(opts, cf): | ||
| 76 | 76 | env=this_env, |
| 77 | 77 | shell=False) |
| 78 | 78 | |
| 79 | + def asynscheduler(): | |
| 80 | + cmd = [cf['observer.target.python'], cf['observer.target.asynscheduler']] | |
| 81 | + if cmd_args: | |
| 82 | + cmd.extend(cmd_args) | |
| 83 | + if opts.daemon is True: | |
| 84 | + cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/asynschedulerd.pid']) | |
| 85 | + | |
| 86 | + logger.debug('asynscheduler:popen - cmd=%s' % cmd) | |
| 87 | + return subprocess.Popen(args=cmd, | |
| 88 | + close_fds=True, | |
| 89 | + env=this_env, | |
| 90 | + shell=False) | |
| 91 | + | |
| 92 | + def asynperformer(): | |
| 93 | + cmd = [cf['observer.target.python'], cf['observer.target.asynperformer']] | |
| 94 | + if cmd_args: | |
| 95 | + cmd.extend(cmd_args) | |
| 96 | + if opts.daemon is True: | |
| 97 | + cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/asynperformerd.pid']) | |
| 98 | + | |
| 99 | + logger.debug('asynperformer:popen - cmd=%s' % cmd) | |
| 100 | + return subprocess.Popen(args=cmd, | |
| 101 | + close_fds=True, | |
| 102 | + env=this_env, | |
| 103 | + shell=False) | |
| 104 | + | |
| 79 | 105 | def status(count, status, default, force=False): |
| 80 | 106 | try: |
| 81 | 107 | if (force is True) or (status != count): |
| @@ -107,7 +133,7 @@ def observer(opts, cf): | ||
| 107 | 133 | |
| 108 | 134 | spoint = time.time() |
| 109 | 135 | |
| 110 | - default_count = int(cf['observer.restart.count']) # default | |
| 136 | + default_count = cf['observer.restart.count'] # default | |
| 111 | 137 | status_count = default_count # status |
| 112 | 138 | count = default_count # now |
| 113 | 139 |
| @@ -120,10 +146,20 @@ def observer(opts, cf): | ||
| 120 | 146 | logger.info('scheduler : [start] - pid=%s, count=%s/%s' |
| 121 | 147 | % (sd.pid, count, cf['observer.restart.count'])) |
| 122 | 148 | |
| 149 | + asynpf = asynperformer() # start!! | |
| 150 | + logger.info('asynperformer : [start] - pid=%s, count=%s/%s' | |
| 151 | + % (pf.pid, count, cf['observer.restart.count'])) | |
| 152 | + | |
| 153 | + asynsd = asynscheduler() # start!! | |
| 154 | + logger.info('asynscheduler : [start] - pid=%s, count=%s/%s' | |
| 155 | + % (sd.pid, count, cf['observer.restart.count'])) | |
| 156 | + | |
| 123 | 157 | status(count, status_count, default_count, True) |
| 124 | 158 | |
| 125 | 159 | try: |
| 126 | 160 | while True: |
| 161 | + simple_log = [] | |
| 162 | + # Performer | |
| 127 | 163 | if not pf.poll() is None: |
| 128 | 164 | logger.debug('return code=%d' % pf.returncode) |
| 129 | 165 | logger.info('performer : [stop] - pid=%s, count=%s/%s' |
| @@ -133,9 +169,11 @@ def observer(opts, cf): | ||
| 133 | 169 | logger.info('performer : [start] - pid=%s, count=%s/%s' |
| 134 | 170 | % (pf.pid, count, cf['observer.restart.count'])) |
| 135 | 171 | else: |
| 136 | - logger.info('performer [running] - pid=%s, count=%s/%s' | |
| 172 | + simple_log.append('performer (running) - count=%s/%s' % (count, cf['observer.restart.count'])) | |
| 173 | + logger.debug('performer [running] - pid=%s, count=%s/%s' | |
| 137 | 174 | % (pf.pid, count, cf['observer.restart.count'])) |
| 138 | - | |
| 175 | + | |
| 176 | + # Scheduler | |
| 139 | 177 | if not sd.poll() is None: |
| 140 | 178 | logger.debug('return code=%d' % sd.returncode) |
| 141 | 179 | logger.info('scheduler : [stop] - pid=%s, count=%s/%s' |
| @@ -145,20 +183,50 @@ def observer(opts, cf): | ||
| 145 | 183 | logger.info('scheduler : [start] - pid=%s, count=%s/%s' |
| 146 | 184 | % (sd.pid, count, cf['observer.restart.count'])) |
| 147 | 185 | else: |
| 148 | - logger.info('scheduler [running] - pid=%s, count=%s/%s' | |
| 186 | + simple_log.append('scheduler (running) - count=%s/%s' % (count, cf['observer.restart.count'])) | |
| 187 | + logger.debug('scheduler [running] - pid=%s, count=%s/%s' | |
| 149 | 188 | % (sd.pid, count, cf['observer.restart.count'])) |
| 150 | - | |
| 189 | + | |
| 190 | + # AsynPerformer | |
| 191 | + if not asynpf.poll() is None: | |
| 192 | + logger.debug('return code=%d' % asynpf.returncode) | |
| 193 | + logger.info('asynperformer : [stop] - pid=%s, count=%s/%s' | |
| 194 | + % (asynpf.pid, count, cf['observer.restart.count'])) | |
| 195 | + asynpf = asynperformer() # restart | |
| 196 | + count -= 1 | |
| 197 | + logger.info('asynperformer : [start] - pid=%s, count=%s/%s' | |
| 198 | + % (asynpf.pid, count, cf['observer.restart.count'])) | |
| 199 | + else: | |
| 200 | + simple_log.append('asynperformer (running) - count=%s/%s' % (count, cf['observer.restart.count'])) | |
| 201 | + logger.debug('asynperformer [running] - pid=%s, count=%s/%s' | |
| 202 | + % (asynpf.pid, count, cf['observer.restart.count'])) | |
| 203 | + | |
| 204 | + # AsynScheduler | |
| 205 | + if not asynsd.poll() is None: | |
| 206 | + logger.debug('return code=%d' % asynsd.returncode) | |
| 207 | + logger.info('asynscheduler : [stop] - pid=%s, count=%s/%s' | |
| 208 | + % (asynsd.pid, count, cf['observer.restart.count'])) | |
| 209 | + asynsd = asynscheduler() # restart | |
| 210 | + count -= 1 | |
| 211 | + logger.info('asynscheduler : [start] - pid=%s, count=%s/%s' | |
| 212 | + % (asynsd.pid, count, cf['observer.restart.count'])) | |
| 213 | + else: | |
| 214 | + simple_log.append('asynscheduler (running) - count=%s/%s' % ( count, cf['observer.restart.count'])) | |
| 215 | + logger.debug('asynscheduler [running] - pid=%s, count=%s/%s' | |
| 216 | + % (asynsd.pid, count, cf['observer.restart.count'])) | |
| 217 | + | |
| 218 | + logger.info(str(simple_log)[1:-1]) | |
| 219 | + | |
| 151 | 220 | # status output |
| 152 | 221 | status(count, status_count, default_count, False) |
| 153 | 222 | |
| 154 | - if ( 0 < int(cf['observer.restart.count.clear.time']) ) and (count <= 0): | |
| 223 | + if ( 0 < cf['observer.restart.count.clear.time'] ) and (count <= 0): | |
| 155 | 224 | epoint = time.time() |
| 156 | 225 | interval = int(math.ceil(epoint) - math.floor(spoint)) |
| 157 | - | |
| 158 | - logger.error('observer restart count reached the value specified in config. Checking interval time. observer.restart.count=%d interval=%d/%s' | |
| 226 | + logger.error('observer restart count reached the value specified in config. Checking interval time. observer.restart.count=%s interval=%d/%s' | |
| 159 | 227 | % (cf['observer.restart.count'], interval, cf['observer.restart.count.clear.time'])) |
| 160 | 228 | |
| 161 | - if interval < int(cf['observer.restart.count.clear.time']): | |
| 229 | + if interval < cf['observer.restart.count.clear.time']: | |
| 162 | 230 | # Failed 'observer.restart.count' times in 'observer.restart.count.clear.time' seconds. |
| 163 | 231 | logger.error('observer restarted %s times in count.clear.time seconds interval. Recognizing as failure. Exiting.' |
| 164 | 232 | % cf['observer.restart.count']) |
| @@ -167,29 +235,42 @@ def observer(opts, cf): | ||
| 167 | 235 | # Failed 'observer.restart.count' times in an interval longer than |
| 168 | 236 | # 'observer.restart.count.clear.time' seconds. Clearing counter. |
| 169 | 237 | spoint = time.time() |
| 170 | - count = int(cf['observer.restart.count']) | |
| 238 | + count = cf['observer.restart.count'] | |
| 171 | 239 | logger.info('observer restarted %s times, but in not short time. Clearing count. start time %s' |
| 172 | 240 | % (cf['observer.restart.count'], astrftime(spoint))) |
| 173 | 241 | |
| 174 | - time.sleep(int(cf['observer.check.interval'])) | |
| 242 | + time.sleep(cf['observer.check.interval']) | |
| 175 | 243 | |
| 176 | 244 | # -- end while |
| 177 | 245 | |
| 178 | 246 | finally: |
| 179 | 247 | # destroy |
| 248 | + # scheduler | |
| 180 | 249 | if not sd is None: |
| 181 | 250 | if kill_proc(sd) is True: |
| 182 | 251 | logger.info('KILL %d: killing scheduler succeeded.' % sd.pid) |
| 183 | 252 | else: |
| 184 | 253 | logger.info('KILL %d: killing scheduler failed.' % sd.pid) |
| 185 | - | |
| 254 | + # performer | |
| 186 | 255 | if not pf is None: |
| 187 | 256 | if kill_proc(pf) is True: |
| 188 | 257 | logger.info('KILL %d: killing performer succeeded.' % pf.pid) |
| 189 | 258 | else: |
| 190 | 259 | logger.info('KILL %d: killing performer failed.' % pf.pid) |
| 260 | + # asynscheduler | |
| 261 | + if not asynsd is None: | |
| 262 | + if kill_proc(asynsd) is True: | |
| 263 | + logger.info('KILL %d: killing asynscheduler succeeded.' % asynsd.pid) | |
| 264 | + else: | |
| 265 | + logger.info('KILL %d: killing asynscheduler failed.' % asynsd.pid) | |
| 266 | + | |
| 267 | + if not asynpf is None: | |
| 268 | + if kill_proc(asynpf) is True: | |
| 269 | + logger.info('KILL %d: killing asynperformer succeeded.' % asynpf.pid) | |
| 270 | + else: | |
| 271 | + logger.info('KILL %d: killing asynperformer failed.' % asynpf.pid) | |
| 191 | 272 | |
| 192 | - return 1 | |
| 273 | + return PROCERROR | |
| 193 | 274 | |
| 194 | 275 | # -- daemon |
| 195 | 276 | def daemonize(stdin, stdout, stderr, pidfile): |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -34,15 +34,47 @@ from sqlalchemy import create_engine, MetaData | ||
| 34 | 34 | from sqlalchemy.orm import sessionmaker, mapper, \ |
| 35 | 35 | clear_mappers, relation, scoped_session |
| 36 | 36 | from sqlalchemy.orm.exc import UnmappedInstanceError |
| 37 | +from sqlalchemy.pool import SingletonThreadPool, QueuePool | |
| 37 | 38 | |
| 38 | 39 | from pysilhouette.db.model import reload_mappers |
| 39 | 40 | from pysilhouette import SilhouetteException |
| 40 | 41 | |
| 42 | + | |
| 43 | +from sqlalchemy.orm import sessionmaker, mapper, SessionExtension, scoped_session | |
| 44 | + | |
| 41 | 45 | class SilhouetteDBException(SilhouetteException): |
| 42 | 46 | """Database running error. |
| 43 | 47 | """ |
| 44 | 48 | pass |
| 45 | 49 | |
| 50 | +def create_database(cf): | |
| 51 | + db = None | |
| 52 | + if cf['database.url'][:6].strip() == 'sqlite': | |
| 53 | + db = Database(cf['database.url'], | |
| 54 | + encoding="utf-8", | |
| 55 | + convert_unicode=True, | |
| 56 | + ) | |
| 57 | + else: | |
| 58 | + if cf['database.pool.status'] == 1: | |
| 59 | + db = Database(cf['database.url'], | |
| 60 | + encoding="utf-8", | |
| 61 | + convert_unicode=True, | |
| 62 | + poolclass=QueuePool, | |
| 63 | + pool_size=cf['database.pool.size'], | |
| 64 | + max_overflow=cf['database.pool.max.overflow'], | |
| 65 | + ) | |
| 66 | + else: | |
| 67 | + db = Database(cf['database.url'], | |
| 68 | + encoding="utf-8", | |
| 69 | + convert_unicode=True, | |
| 70 | + poolclass=SingletonThreadPool, | |
| 71 | + pool_size=cf['database.pool.size'], | |
| 72 | + ) | |
| 73 | + if db is None: | |
| 74 | + raise SilhouetteDBException('Initializing a database error - "Database" failed to create.') | |
| 75 | + reload_mappers(db.get_metadata()) | |
| 76 | + return db | |
| 77 | + | |
| 46 | 78 | class Database: |
| 47 | 79 | """TODO |
| 48 | 80 | """ |
| @@ -69,9 +101,12 @@ class Database: | ||
| 69 | 101 | return self.__metadata |
| 70 | 102 | |
| 71 | 103 | def get_session(self): |
| 72 | - if self.__Session is None: | |
| 73 | - self.__Session = sessionmaker(bind=self.__engine) | |
| 74 | - return self.__Session() | |
| 104 | + #if self.__Session is None: | |
| 105 | + # self.__Session = sessionmaker(bind=self.__engine) | |
| 106 | + #return self.__Session() | |
| 107 | + return scoped_session( | |
| 108 | + sessionmaker(bind=self.__engine, autoflush=True)) | |
| 109 | + | |
| 75 | 110 | |
| 76 | 111 | |
| 77 | 112 | def dbsave(func): |
| @@ -115,7 +150,7 @@ def dbupdate(func): | ||
| 115 | 150 | logger = logging.getLogger('pysilhouette.db') |
| 116 | 151 | session = args[0] |
| 117 | 152 | model = args[1] |
| 118 | - model_name = repr(model).split("<")[0] | |
| 153 | + model_name = repr(model).split("<")[-1] | |
| 119 | 154 | model_id = model.id |
| 120 | 155 | try: |
| 121 | 156 | func(*args, **kwargs) |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -50,6 +50,16 @@ def jobgroup_findbystatus(session, status=JOBGROUP_STATUS['PEND']): | ||
| 50 | 50 | return session.query(JobGroup).filter( |
| 51 | 51 | JobGroup.status == status).order_by(JobGroup.id.asc()).all() |
| 52 | 52 | |
| 53 | +def jobgroup_findbytype_status(session, type, status=JOBGROUP_STATUS['PEND']): | |
| 54 | + return session.query(JobGroup).filter( | |
| 55 | + JobGroup.type == type).filter( | |
| 56 | + JobGroup.status == status).order_by(JobGroup.id.asc()).all() | |
| 57 | + | |
| 58 | +def jobgroup_findbytype_limit_status(session, type, limit, status=JOBGROUP_STATUS['PEND']): | |
| 59 | + return session.query(JobGroup).filter( | |
| 60 | + JobGroup.type == type).filter( | |
| 61 | + JobGroup.status == status).order_by(JobGroup.id.asc()).limit(limit).all() | |
| 62 | + | |
| 53 | 63 | def jobgroup_findbyuniqkey(session, uniq_key): |
| 54 | 64 | if uniq_key: |
| 55 | 65 | return session.query(JobGroup).filter( |
| @@ -100,7 +110,7 @@ def job_update(session, m_job, status=None, autocommit=True): | ||
| 100 | 110 | m_job.status = status |
| 101 | 111 | |
| 102 | 112 | ret = update(session, m_job) |
| 103 | - | |
| 113 | + | |
| 104 | 114 | if autocommit is True: |
| 105 | 115 | session.commit() |
| 106 | 116 | return ret |
| @@ -111,7 +121,7 @@ def job_result_action(session, job, info, autocommit=True): | ||
| 111 | 121 | job.action_stderr = info['stderr'] |
| 112 | 122 | |
| 113 | 123 | ret = job_update(session, job) |
| 114 | - | |
| 124 | + | |
| 115 | 125 | if autocommit is True: |
| 116 | 126 | session.commit() |
| 117 | 127 |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -71,14 +71,22 @@ JOBGROUP_STATUS = { | ||
| 71 | 71 | 'APPERR' : _RES_APP_ERROR, |
| 72 | 72 | } |
| 73 | 73 | |
| 74 | +#: Jobgroup type | |
| 75 | +JOBGROUP_TYPE = { | |
| 76 | + 'SERIAL': 0, #Serial | |
| 77 | + 'PARALLEL' : 1, #Parallel | |
| 78 | + } | |
| 79 | + | |
| 74 | 80 | #: Jobgroup Table instance. |
| 75 | 81 | def get_jobgroup_table(metadata, now): |
| 76 | 82 | return sqlalchemy.Table('jobgroup', metadata, |
| 77 | 83 | sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, |
| 78 | 84 | autoincrement=True), |
| 79 | - sqlalchemy.Column('name', sqlalchemy.String(32), nullable=False), | |
| 85 | + sqlalchemy.Column('name', sqlalchemy.String(512), nullable=False), | |
| 80 | 86 | sqlalchemy.Column('uniq_key', sqlalchemy.Unicode(36), nullable=False), |
| 81 | 87 | sqlalchemy.Column('finish_command', sqlalchemy.String(1024)), |
| 88 | + sqlalchemy.Column('type', sqlalchemy.Integer(1), nullable=False, | |
| 89 | + default=JOBGROUP_TYPE['SERIAL']), | |
| 82 | 90 | sqlalchemy.Column('status', sqlalchemy.Unicode(3), nullable=False, |
| 83 | 91 | default=JOBGROUP_STATUS['PEND']), |
| 84 | 92 | sqlalchemy.Column('register', sqlalchemy.String(32), nullable=True), |
| @@ -159,9 +167,10 @@ class JobGroup(Model): | ||
| 159 | 167 | """JobGroup Table class. |
| 160 | 168 | """ |
| 161 | 169 | |
| 162 | - def __init__(self, name, uniq_key): | |
| 170 | + def __init__(self, name, uniq_key, type=JOBGROUP_TYPE['SERIAL']): | |
| 163 | 171 | self.name = name |
| 164 | 172 | self.uniq_key = uniq_key |
| 173 | + self.type = type | |
| 165 | 174 | |
| 166 | 175 | def __repr__(self): |
| 167 | 176 | return "JobGroup<'%s','%s'>" % (self.name, self.uniq_key) |
| @@ -169,10 +178,6 @@ class JobGroup(Model): | ||
| 169 | 178 | class Job(Model): |
| 170 | 179 | """Job Table class. |
| 171 | 180 | """ |
| 172 | - | |
| 173 | - #: Maximum number of characters to stdout. | |
| 174 | - STD_OUTPUT_LIMIT = 4096 | |
| 175 | - | |
| 176 | 181 | def __init__(self, name, order, action_command): |
| 177 | 182 | self.name = name |
| 178 | 183 | self.order = order |
| @@ -0,0 +1,79 @@ | ||
| 1 | +#!/usr/bin/env python | |
| 2 | +# -*- coding: utf-8 -*- | |
| 3 | +# | |
| 4 | +# This file is part of Pysilhouette. | |
| 5 | +# | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | +# | |
| 8 | +# Permission is hereby granted, free of charge, to any person obtaining a copy | |
| 9 | +# of this software and associated documentation files (the "Software"), to deal | |
| 10 | +# in the Software without restriction, including without limitation the rights | |
| 11 | +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| 12 | +# copies of the Software, and to permit persons to whom the Software is | |
| 13 | +# furnished to do so, subject to the following conditions: | |
| 14 | +# | |
| 15 | +# The above copyright notice and this permission notice shall be included in | |
| 16 | +# all copies or substantial portions of the Software. | |
| 17 | +# | |
| 18 | +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| 19 | +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| 20 | +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
| 21 | +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| 22 | +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| 23 | +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
| 24 | +# THE SOFTWARE. | |
| 25 | +# | |
| 26 | + | |
| 27 | +""" | |
| 28 | +@author: Kei Funagayama <kei@karesansui-project.info> | |
| 29 | +""" | |
| 30 | + | |
| 31 | +import logging | |
| 32 | +import os | |
| 33 | + | |
| 34 | +from pysilhouette.util import kill_proc, write_pidfile, create_fifo | |
| 35 | + | |
| 36 | +class ER: | |
| 37 | + """ER Class | |
| 38 | + """ | |
| 39 | + | |
| 40 | + def __init__(self, opts, cf): | |
| 41 | + self.logger = logging.getLogger('pysilhouette.%s' % self.__class__.__name__.lower()) | |
| 42 | + #self.entity = self.__class__.__name__.lower() | |
| 43 | + self.opts = opts | |
| 44 | + self.cf = cf | |
| 45 | + | |
| 46 | + def _fifo(self, prefix): | |
| 47 | + if os.access(self.cf["%s.mkfifo.path" % prefix], os.F_OK|os.R_OK|os.W_OK) is False: | |
| 48 | + try: | |
| 49 | + os.unlink(self.cf["%s.mkfifo.path" % prefix]) | |
| 50 | + self.logger.info('Deleted filo file. - file=%s' \ | |
| 51 | + % self.cf["%s.mkfifo.path" % prefix]) | |
| 52 | + except: | |
| 53 | + pass # Not anything | |
| 54 | + | |
| 55 | + try: | |
| 56 | + create_fifo(self.cf["%s.mkfifo.path" % prefix], | |
| 57 | + self.cf["%s.mkfifo.user.name" % prefix], | |
| 58 | + self.cf["%s.mkfifo.group.name" % prefix], | |
| 59 | + self.cf["%s.mkfifo.perms" % prefix], | |
| 60 | + ) | |
| 61 | + except OSError, oe: | |
| 62 | + self.logger.error('Failed to create a fifo file.') | |
| 63 | + raise oe | |
| 64 | + | |
| 65 | + self.logger.info('The fifo file was created. - file=%s' \ | |
| 66 | + % self.cf["%s.mkfifo.path" % prefix]) | |
| 67 | + return True | |
| 68 | + | |
| 69 | + def _setdaemon(self): | |
| 70 | + if self.opts.daemon is True: | |
| 71 | + pid = os.getpid() | |
| 72 | + try: | |
| 73 | + write_pidfile(self.opts.pidfile, pid) | |
| 74 | + except Exception: | |
| 75 | + self.logger.error('Could not create process file. - file=%s' % self.opts.pidfile) | |
| 76 | + raise e | |
| 77 | + | |
| 78 | + self.logger.info('The process file was created. - file=%s' % self.opts.pidfile) | |
| 79 | + return True |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -34,149 +34,89 @@ import os | ||
| 34 | 34 | import traceback |
| 35 | 35 | import logging |
| 36 | 36 | |
| 37 | -from sqlalchemy.pool import SingletonThreadPool, QueuePool | |
| 37 | +from pysilhouette import PROCERROR, PROCSUCCESS | |
| 38 | +from pysilhouette.er import ER | |
| 38 | 39 | from pysilhouette.log import reload_conf |
| 39 | -from pysilhouette.prep import readconf, getopts, chkopts | |
| 40 | -from pysilhouette.db import Database | |
| 41 | -from pysilhouette.db.model import reload_mappers, JOBGROUP_STATUS | |
| 42 | -from pysilhouette.db.access import jobgroup_findbystatus, jobgroup_update | |
| 40 | +from pysilhouette.prep import readconf, getopts, chkopts, parse_conf | |
| 41 | +from pysilhouette.db import create_database, Database | |
| 42 | +from pysilhouette.db.model import JOBGROUP_STATUS, JOBGROUP_TYPE | |
| 43 | +from pysilhouette.db.access import jobgroup_findbytype_status, jobgroup_update | |
| 43 | 44 | from pysilhouette.worker import SimpleWorker |
| 44 | 45 | |
| 45 | -from pysilhouette.util import kill_proc, write_pidfile, create_fifo | |
| 46 | - | |
| 47 | -def sigterm_handler(signum, frame): | |
| 48 | - logger = logging.getLogger('pysilhouette.performer.signal') | |
| 49 | - logger.info('Stop the performerd with signal - pid=%s, signal=%s' % (os.getpid(), signum)) | |
| 50 | - | |
| 51 | -def performer(opts, cf): | |
| 52 | - logger = logging.getLogger('pysilhouette.performer') | |
| 53 | - | |
| 54 | - # Initialization | |
| 55 | - if os.access(cf["observer.mkfifo.path"], os.F_OK|os.R_OK|os.W_OK) is False: | |
| 56 | - try: | |
| 57 | - os.unlink(cf["observer.mkfifo.path"]) | |
| 58 | - logger.info('Deleted filo file. - file=%s' % cf["observer.mkfifo.path"]) | |
| 59 | - except: | |
| 60 | - pass # Not anything | |
| 61 | - | |
| 62 | - create_fifo(cf["observer.mkfifo.path"], | |
| 63 | - cf["observer.mkfifo.user.name"], | |
| 64 | - cf["observer.mkfifo.group.name"], | |
| 65 | - cf["observer.mkfifo.perms"], | |
| 66 | - ) | |
| 67 | - | |
| 68 | - logger.info('The fifo file was created. - file=%s' % cf["observer.mkfifo.path"]) | |
| 69 | - | |
| 70 | - | |
| 71 | - if opts.daemon is True: | |
| 72 | - pid = os.getpid() | |
| 73 | - if write_pidfile(opts.pidfile, pid): | |
| 74 | - logger.info('The process file was created. - file=%s' % opts.pidfile) | |
| 75 | - else: | |
| 76 | - logger.error('Could not create process file. - file=%s' % opts.pidfile) | |
| 77 | - return 1 | |
| 78 | - | |
| 79 | - logger.info('performer : [started]') | |
| 80 | - | |
| 81 | - try: | |
| 82 | - if cf['database.url'][:6].strip() == 'sqlite': | |
| 83 | - db = Database(cf['database.url'], | |
| 84 | - encoding="utf-8", | |
| 85 | - convert_unicode=True, | |
| 86 | - #assert_unicode='warn', # DEBUG | |
| 87 | - #echo = opts.verbose, | |
| 88 | - #echo_pool = opts.verbose, | |
| 89 | - echo=True, # TODO | |
| 90 | - echo_pool=True # TODO | |
| 91 | - ) | |
| 92 | - else: | |
| 93 | - if int(cf['database.pool.status']) == 1: | |
| 94 | - db = Database(cf['database.url'], | |
| 95 | - encoding="utf-8", | |
| 96 | - convert_unicode=True, | |
| 97 | - #assert_unicode='warn', # DEBUG | |
| 98 | - poolclass=QueuePool, | |
| 99 | - pool_size=int(cf['database.pool.size']), | |
| 100 | - max_overflow=int(cf['database.pool.max.overflow']), | |
| 101 | - #echo = opts.verbose, | |
| 102 | - #echo_pool = opts.verbose, | |
| 103 | - echo=True, # TODO | |
| 104 | - echo_pool=True # TODO | |
| 105 | - ) | |
| 106 | - else: | |
| 107 | - db = Database(cf['database.url'], | |
| 108 | - encoding="utf-8", | |
| 109 | - convert_unicode=True, | |
| 110 | - #assert_unicode='warn', # DEBUG | |
| 111 | - poolclass=SingletonThreadPool, | |
| 112 | - #echo = opts.verbose, | |
| 113 | - #echo_pool = opts.verbose, | |
| 114 | - echo=True, # TODO | |
| 115 | - echo_pool=True # TODO | |
| 116 | - ) | |
| 117 | - | |
| 118 | - reload_mappers(db.get_metadata()) | |
| 119 | - | |
| 120 | - except Exception, e: | |
| 121 | - logger.error('Initializing a database error - %s' % str(e.args)) | |
| 122 | - t_logger = logging.getLogger('pysilhouette_traceback') | |
| 123 | - t_logger.error(traceback.format_exc()) | |
| 124 | - return 1 | |
| 125 | - | |
| 126 | - while True: | |
| 127 | - fp = open(cf["observer.mkfifo.path"], 'r') | |
| 128 | - try: | |
| 129 | - code = fp.read() | |
| 130 | - finally: | |
| 131 | - fp.close() | |
| 46 | +class Performer(ER): | |
| 47 | + """Performer Class | |
| 48 | + """ | |
| 49 | + def __init__(self, opts, cf): | |
| 50 | + ER.__init__(self, opts, cf) | |
| 51 | + self._fifo('performer') | |
| 52 | + self._setdaemon() | |
| 53 | + self.db = create_database(self.cf) | |
| 54 | + | |
| 55 | + def process(self): | |
| 56 | + self.logger.info('performer : [started]') | |
| 57 | + while True: | |
| 58 | + fp = open(self.cf["performer.mkfifo.path"], 'r') | |
| 59 | + try: | |
| 60 | + code = fp.read() | |
| 61 | + finally: | |
| 62 | + fp.close() | |
| 132 | 63 | |
| 133 | - logger.info('Received code from the FIFO file. - code=%s' % code) | |
| 134 | - session = db.get_session() | |
| 135 | - m_jgs = jobgroup_findbystatus(session) | |
| 136 | - session.close() | |
| 137 | - | |
| 138 | - logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' % len(m_jgs)) | |
| 139 | - | |
| 140 | - if code == cf["observer.mkfifo.start.code"]: | |
| 141 | - if 0 < len(m_jgs): | |
| 142 | - for m_jg in m_jgs: | |
| 143 | - try: | |
| 144 | - w = SimpleWorker(cf, db, m_jg.id) | |
| 145 | - w.run() | |
| 146 | - except Exception, e: | |
| 147 | - logger.info('Failed to perform the job group. Exceptions are not expected. - jobgroup_id=%d : %s' | |
| 148 | - % (m_jg.id, str(e.args))) | |
| 149 | - print >>sys.stderr, traceback.format_exc() | |
| 150 | - t_logger = logging.getLogger('pysilhouette_traceback') | |
| 151 | - t_logger.error(traceback.format_exc()) | |
| 152 | - | |
| 64 | + #self.logger.info('Received code from the FIFO file. - code=%s' % code) | |
| 65 | + session = self.db.get_session() | |
| 66 | + m_jgs = jobgroup_findbytype_status(session, JOBGROUP_TYPE['SERIAL']) | |
| 67 | + session.close() | |
| 68 | + #self.logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' % len(m_jgs)) | |
| 69 | + self.logger.info('Activity Information. - [fifo_code=%s, type=serial, jobgroup_num=%d]' % (code, len(m_jgs))) | |
| 70 | + if code == self.cf["performer.mkfifo.start.code"]: | |
| 71 | + if 0 < len(m_jgs): | |
| 72 | + for m_jg in m_jgs: | |
| 153 | 73 | try: |
| 154 | - session = db.get_session() | |
| 155 | - jobgroup_update(session, m_jg, JOBGROUP_STATUS['APPERR']) | |
| 156 | - session.close() | |
| 157 | - except: | |
| 158 | - logger.error('Failed to change the status of the job group. - jobgroup_id=%d : %s' | |
| 74 | + w = SimpleWorker(self.cf, self.db, m_jg.id) | |
| 75 | + w.process() | |
| 76 | + except Exception, e: | |
| 77 | + self.logger.info('Failed to perform the job group. Exceptions are not expected. - jobgroup_id=%d : %s' | |
| 159 | 78 | % (m_jg.id, str(e.args))) |
| 79 | + print >>sys.stderr, traceback.format_exc() | |
| 160 | 80 | t_logger = logging.getLogger('pysilhouette_traceback') |
| 161 | 81 | t_logger.error(traceback.format_exc()) |
| 162 | - | |
| 82 | + | |
| 83 | + try: | |
| 84 | + session = self.db.get_session() | |
| 85 | + jobgroup_update(session, m_jg, JOBGROUP_STATUS['APPERR']) | |
| 86 | + session.close() | |
| 87 | + except: | |
| 88 | + logger.error('Failed to change the status of the job group. - jobgroup_id=%d : %s' | |
| 89 | + % (m_jg.id, str(e.args))) | |
| 90 | + t_logger = logging.getLogger('pysilhouette_traceback') | |
| 91 | + t_logger.error(traceback.format_exc()) | |
| 92 | + | |
| 93 | + else: | |
| 94 | + #self.logger.info('No Job Group.') | |
| 95 | + pass | |
| 96 | + elif code == self.cf["performer.mkfifo.stop.code"]: | |
| 97 | + self.logger.warning('Received stop code from the FIFO file. - code=%s' % code) | |
| 98 | + return PROCSUCCESS | |
| 163 | 99 | else: |
| 164 | - logger.info('No Job Group.') | |
| 165 | - elif code == cf["observer.mkfifo.stop.code"]: | |
| 166 | - logger.info('Received stop code from the FIFO file. - code=%s' % code) | |
| 167 | - break | |
| 168 | - else: | |
| 169 | - logger.info('Received illegal code from the FIFO file. - code=%s' % code) | |
| 100 | + self.logger.warning('Received illegal code from the FIFO file. - code=%s' % code) | |
| 101 | + | |
| 102 | +# -- | |
| 103 | +def sigterm_handler(signum, frame): | |
| 104 | + logger = logging.getLogger('pysilhouette.performer.signal') | |
| 105 | + logger.info('Stop the performerd with signal - pid=%s, signal=%s' % (os.getpid(), signum)) | |
| 170 | 106 | |
| 171 | 107 | def main(): |
| 172 | 108 | (opts, args) = getopts() |
| 173 | 109 | if chkopts(opts) is True: |
| 174 | - return 1 | |
| 110 | + return PROCERROR | |
| 175 | 111 | |
| 176 | 112 | cf = readconf(opts.config) |
| 177 | 113 | if cf is None: |
| 178 | 114 | print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0]) |
| 179 | - return 1 | |
| 115 | + return PROCERROR | |
| 116 | + | |
| 117 | + # conf parse | |
| 118 | + if parse_conf(cf) is False: | |
| 119 | + return PROCERROR | |
| 180 | 120 | |
| 181 | 121 | # set env=PYSILHOUETTE_CONF |
| 182 | 122 | os.environ['PYSILHOUETTE_CONF'] = opts.config |
| @@ -185,12 +125,13 @@ def main(): | ||
| 185 | 125 | logger = logging.getLogger('pysilhouette.performer') |
| 186 | 126 | else: |
| 187 | 127 | print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0] |
| 188 | - return 1 | |
| 128 | + return PROCERROR | |
| 189 | 129 | |
| 190 | 130 | try: |
| 191 | 131 | try: |
| 192 | 132 | signal.signal(signal.SIGTERM, sigterm_handler) |
| 193 | - ret = performer(opts, cf) # start!! | |
| 133 | + performer = Performer(opts, cf) | |
| 134 | + ret = performer.process() # start!! | |
| 194 | 135 | return ret |
| 195 | 136 | except KeyboardInterrupt, k: |
| 196 | 137 | logger.critical('Keyboard interrupt occurred. - %s' % str(k.args)) |
| @@ -200,6 +141,7 @@ def main(): | ||
| 200 | 141 | print >>sys.stderr, 'System error has occurred. - %s' % str(e.args) |
| 201 | 142 | print >>sys.stderr, traceback.format_exc() |
| 202 | 143 | t_logger = logging.getLogger('pysilhouette_traceback') |
| 144 | + t_logger.critical(e) | |
| 203 | 145 | t_logger.critical(traceback.format_exc()) |
| 204 | 146 | |
| 205 | 147 | finally: |
| @@ -207,5 +149,7 @@ def main(): | ||
| 207 | 149 | os.unlink(opts.pidfile) |
| 208 | 150 | logger.info('Process file has been deleted.. - pidfile=%s' % opts.pidfile) |
| 209 | 151 | |
| 152 | + return PROCERROR | |
| 153 | + | |
| 210 | 154 | if __name__ == '__main__': |
| 211 | 155 | sys.exit(main()) |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -75,69 +75,91 @@ def chkopts(opts): | ||
| 75 | 75 | |
| 76 | 76 | return False |
| 77 | 77 | |
| 78 | -def chk_conf(cf): | |
| 79 | - def is_key(key): | |
| 80 | - if cf.has_key(key) is True and 0 < len(cf[key]): | |
| 81 | - return True | |
| 82 | - else: | |
| 83 | - return False | |
| 78 | +def parse_conf(cf): | |
| 79 | + from pysilhouette.util import is_int, is_key, set_cf_int | |
| 80 | + from pysilhouette.uniqkey import is_uuid | |
| 84 | 81 | |
| 85 | 82 | # env |
| 86 | 83 | err_key = "" |
| 87 | - if len(err_key) <= 0 and is_key("env.python") is False: | |
| 84 | + if len(err_key) <= 0 and is_key(cf, "env.python") is False: | |
| 88 | 85 | err_key = "env.python" |
| 89 | - if len(err_key) <= 0 and is_key("env.sys.log.conf.path") is False: | |
| 86 | + if len(err_key) <= 0 and is_key(cf, "env.sys.log.conf.path") is False: | |
| 90 | 87 | err_key = "env.sys.log.conf.path" |
| 91 | - if len(err_key) <= 0 and is_key("env.uniqkey") is False: | |
| 88 | + if len(err_key) <= 0 and is_key(cf, "env.uniqkey") is False: | |
| 92 | 89 | err_key = "env.uniqkey" |
| 93 | - if len(err_key) <= 0 and is_key("daemon.stdin") is False: | |
| 90 | + if len(err_key) <= 0 and is_key(cf, "daemon.stdin") is False: | |
| 94 | 91 | err_key = "daemon.stdin" |
| 95 | - if len(err_key) <= 0 and is_key("daemon.stdout") is False: | |
| 92 | + if len(err_key) <= 0 and is_key(cf, "daemon.stdout") is False: | |
| 96 | 93 | err_key = "daemon.stdout" |
| 97 | - if len(err_key) <= 0 and is_key("daemon.stderr") is False: | |
| 94 | + if len(err_key) <= 0 and is_key(cf, "daemon.stderr") is False: | |
| 98 | 95 | err_key = "daemon.stderr" |
| 99 | - if len(err_key) <= 0 and is_key("observer.target.python") is False: | |
| 96 | + if len(err_key) <= 0 and is_key(cf, "observer.target.python") is False: | |
| 100 | 97 | err_key = "observer.target.python" |
| 101 | - if len(err_key) <= 0 and is_key("observer.target.scheduler") is False: | |
| 98 | + if len(err_key) <= 0 and is_key(cf, "observer.target.scheduler") is False: | |
| 102 | 99 | err_key = "observer.target.scheduler" |
| 103 | - if len(err_key) <= 0 and is_key("observer.target.performer") is False: | |
| 100 | + if len(err_key) <= 0 and is_key(cf, "observer.target.performer") is False: | |
| 104 | 101 | err_key = "observer.target.performer" |
| 105 | - if len(err_key) <= 0 and is_key("observer.restart.count") is False: | |
| 102 | + if len(err_key) <= 0 and is_key(cf, "observer.restart.count") is False: | |
| 106 | 103 | err_key = "observer.restart.count" |
| 107 | - if len(err_key) <= 0 and is_key("observer.restart.count.clear.time") is False: | |
| 104 | + if len(err_key) <= 0 and is_key(cf, "observer.restart.count.clear.time") is False: | |
| 108 | 105 | err_key = "observer.restart.count.clear.time" |
| 109 | - if len(err_key) <= 0 and is_key("observer.check.interval") is False: | |
| 106 | + if len(err_key) <= 0 and is_key(cf, "observer.check.interval") is False: | |
| 110 | 107 | err_key = "observer.check.interval" |
| 111 | - if len(err_key) <= 0 and is_key("observer.status.path") is False: | |
| 108 | + if len(err_key) <= 0 and is_key(cf, "observer.status.path") is False: | |
| 112 | 109 | err_key = "observer.status.path" |
| 113 | - if len(err_key) <= 0 and is_key("observer.mkfifo.path") is False: | |
| 114 | - err_key = "observer.mkfifo.path" | |
| 115 | - if len(err_key) <= 0 and is_key("observer.mkfifo.start.code") is False: | |
| 116 | - err_key = "observer.mkfifo.start.code" | |
| 117 | - if len(err_key) <= 0 and is_key("observer.mkfifo.ignore.code") is False: | |
| 118 | - err_key = "observer.mkfifo.ignore.code" | |
| 119 | - if len(err_key) <= 0 and is_key("observer.mkfifo.stop.code") is False: | |
| 120 | - err_key = "observer.mkfifo.stop.code" | |
| 121 | - if len(err_key) <= 0 and is_key("observer.mkfifo.user.name") is False: | |
| 122 | - err_key = "observer.mkfifo.user.name" | |
| 123 | - if len(err_key) <= 0 and is_key("observer.mkfifo.group.name") is False: | |
| 124 | - err_key = "observer.mkfifo.group.name" | |
| 125 | - if len(err_key) <= 0 and is_key("observer.mkfifo.perms") is False: | |
| 126 | - err_key = "observer.mkfifo.perms" | |
| 127 | - if len(err_key) <= 0 and is_key("scheduler.interval") is False: | |
| 110 | + | |
| 111 | + # performer | |
| 112 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.start.code") is False: | |
| 113 | + err_key = "performer.mkfifo.start.code" | |
| 114 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.ignore.code") is False: | |
| 115 | + err_key = "performer.mkfifo.ignore.code" | |
| 116 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.stop.code") is False: | |
| 117 | + err_key = "performer.mkfifo.stop.code" | |
| 118 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.user.name") is False: | |
| 119 | + err_key = "performer.mkfifo.user.name" | |
| 120 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.group.name") is False: | |
| 121 | + err_key = "performer.mkfifo.group.name" | |
| 122 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.perms") is False: | |
| 123 | + err_key = "performer.mkfifo.perms" | |
| 124 | + if len(err_key) <= 0 and is_key(cf, "performer.mkfifo.path") is False: | |
| 125 | + err_key = "performer.mkfifo.path" | |
| 126 | + | |
| 127 | + # asynperformer | |
| 128 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.start.code") is False: | |
| 129 | + err_key = "asynperformer.mkfifo.start.code" | |
| 130 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.ignore.code") is False: | |
| 131 | + err_key = "asynperformer.mkfifo.ignore.code" | |
| 132 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.stop.code") is False: | |
| 133 | + err_key = "asynperformer.mkfifo.stop.code" | |
| 134 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.user.name") is False: | |
| 135 | + err_key = "asynperformer.mkfifo.user.name" | |
| 136 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.group.name") is False: | |
| 137 | + err_key = "asynperformer.mkfifo.group.name" | |
| 138 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.perms") is False: | |
| 139 | + err_key = "asynperformer.mkfifo.perms" | |
| 140 | + if len(err_key) <= 0 and is_key(cf, "asynperformer.mkfifo.path") is False: | |
| 141 | + err_key = "asynperformer.mkfifo.path" | |
| 142 | + | |
| 143 | + # asynscheduler | |
| 144 | + if len(err_key) <= 0 and is_key(cf, "asynscheduler.interval") is False: | |
| 145 | + err_key = "asynscheduler.interval" | |
| 146 | + | |
| 147 | + if len(err_key) <= 0 and is_key(cf, "scheduler.interval") is False: | |
| 128 | 148 | err_key = "scheduler.interval" |
| 129 | - if len(err_key) <= 0 and is_key("job.popen.env.lang") is False: | |
| 149 | + if len(err_key) <= 0 and is_key(cf, "job.popen.env.lang") is False: | |
| 130 | 150 | err_key = "job.popen.env.lang" |
| 131 | - if len(err_key) <= 0 and is_key("job.popen.timeout") is False: | |
| 151 | + if len(err_key) <= 0 and is_key(cf, "job.popen.timeout") is False: | |
| 132 | 152 | err_key = "job.popen.timeout" |
| 133 | - if len(err_key) <= 0 and is_key("job.popen.waittime") is False: | |
| 153 | + if len(err_key) <= 0 and is_key(cf, "job.popen.waittime") is False: | |
| 134 | 154 | err_key = "job.popen.waittime" |
| 135 | - if len(err_key) <= 0 and is_key("database.url") is False: | |
| 155 | + if len(err_key) <= 0 and is_key(cf, "job.popen.output.limit") is False: | |
| 156 | + err_key = "job.popen.output.limit" | |
| 157 | + | |
| 158 | + if len(err_key) <= 0 and is_key(cf, "database.url") is False: | |
| 136 | 159 | err_key = "database.url" |
| 137 | - if len(err_key) <= 0 and is_key("database.pool.status") is False: | |
| 160 | + if len(err_key) <= 0 and is_key(cf, "database.pool.status") is False: | |
| 138 | 161 | err_key = "database.pool.status" |
| 139 | 162 | |
| 140 | - from pysilhouette.uniqkey import is_uuid | |
| 141 | 163 | if is_uuid(cf["env.uniqkey"]) is False: |
| 142 | 164 | print >>sys.stderr, 'UUID format is not set. - env.uniqkey' |
| 143 | 165 | return False |
| @@ -162,69 +184,136 @@ def chk_conf(cf): | ||
| 162 | 184 | print >>sys.stderr, 'Incorrect file permissions. - observer.target.performer=%s' % (cf["observer.target.performer"]) |
| 163 | 185 | return False |
| 164 | 186 | |
| 165 | - from pysilhouette.util import is_int | |
| 166 | 187 | if is_int(cf["observer.restart.count"]) is False: |
| 167 | 188 | print >>sys.stderr, 'Must be a number. - observer.restart.count=%s' % (cf["observer.restart.count"]) |
| 168 | 189 | return False |
| 190 | + else: | |
| 191 | + set_cf_int(cf, "observer.restart.count") | |
| 169 | 192 | |
| 170 | 193 | if is_int(cf["observer.restart.count.clear.time"]) is False: |
| 171 | 194 | print >>sys.stderr, 'Must be a number. - observer.restart.count.clear.time=%s' % (cf["observer.restart.count.clear.time"]) |
| 172 | 195 | return False |
| 196 | + else: | |
| 197 | + set_cf_int(cf, "observer.restart.count.clear.time") | |
| 173 | 198 | |
| 174 | 199 | if is_int(cf["observer.check.interval"]) is False: |
| 175 | 200 | print >>sys.stderr, 'Must be a number. - observer.check.interval=%s' % (cf["observer.check.interval"]) |
| 176 | 201 | return False |
| 202 | + else: | |
| 203 | + set_cf_int(cf, "observer.check.interval") | |
| 204 | + | |
| 205 | + # performer | |
| 206 | + if is_int(cf["performer.mkfifo.start.code"]) is False: | |
| 207 | + print >>sys.stderr, 'Must be a number. - performer.mkfifo.start.code=%s' % (cf["performer.mkfifo.start.code"]) | |
| 208 | + return False | |
| 209 | + | |
| 210 | + if is_int(cf["performer.mkfifo.ignore.code"]) is False: | |
| 211 | + print >>sys.stderr, 'Must be a number. - performer.mkfifo.ignore.code=%s' % (cf["performer.mkfifo.ignore.code"]) | |
| 212 | + return False | |
| 177 | 213 | |
| 178 | - if is_int(cf["observer.mkfifo.start.code"]) is False: | |
| 179 | - print >>sys.stderr, 'Must be a number. - observer.mkfifo.start.code=%s' % (cf["observer.mkfifo.start.code"]) | |
| 214 | + if is_int(cf["performer.mkfifo.stop.code"]) is False: | |
| 215 | + print >>sys.stderr, 'Must be a number. - performer.mkfifo.stop.code=%s' % (cf["performer.mkfifo.stop.code"]) | |
| 180 | 216 | return False |
| 181 | 217 | |
| 182 | - if is_int(cf["observer.mkfifo.ignore.code"]) is False: | |
| 183 | - print >>sys.stderr, 'Must be a number. - observer.mkfifo.ignore.code=%s' % (cf["observer.mkfifo.ignore.code"]) | |
| 218 | + # asynperformer | |
| 219 | + if is_int(cf["asynperformer.mkfifo.start.code"]) is False: | |
| 220 | + print >>sys.stderr, 'Must be a number. - asynperformer.mkfifo.start.code=%s' % (cf["asynperformer.mkfifo.start.code"]) | |
| 184 | 221 | return False |
| 185 | 222 | |
| 186 | - if is_int(cf["observer.mkfifo.stop.code"]) is False: | |
| 187 | - print >>sys.stderr, 'Must be a number. - observer.mkfifo.stop.code=%s' % (cf["observer.mkfifo.stop.code"]) | |
| 223 | + if is_int(cf["asynperformer.mkfifo.ignore.code"]) is False: | |
| 224 | + print >>sys.stderr, 'Must be a number. - asynperformer.mkfifo.ignore.code=%s' % (cf["asynperformer.mkfifo.ignore.code"]) | |
| 188 | 225 | return False |
| 189 | 226 | |
| 227 | + if is_int(cf["asynperformer.mkfifo.stop.code"]) is False: | |
| 228 | + print >>sys.stderr, 'Must be a number. - asynperformer.mkfifo.stop.code=%s' % (cf["asynperformer.mkfifo.stop.code"]) | |
| 229 | + return False | |
| 230 | + | |
| 231 | + if is_int(cf["asynscheduler.interval"]) is False: | |
| 232 | + print >>sys.stderr, 'Must be a number. - asynscheduler.interval=%s' % (cf["asynscheduler.interval"]) | |
| 233 | + return False | |
| 234 | + else: | |
| 235 | + set_cf_int(cf, "asynscheduler.interval") | |
| 236 | + | |
| 190 | 237 | if is_int(cf["scheduler.interval"]) is False: |
| 191 | 238 | print >>sys.stderr, 'Must be a number. - scheduler.interval=%s' % (cf["scheduler.interval"]) |
| 192 | 239 | return False |
| 240 | + else: | |
| 241 | + set_cf_int(cf, "scheduler.interval") | |
| 193 | 242 | |
| 194 | 243 | if is_int(cf["job.popen.timeout"]) is False: |
| 195 | 244 | print >>sys.stderr, 'Must be a number. - job.popen.timeout=%s' % (cf["job.popen.timeout"]) |
| 196 | 245 | return False |
| 246 | + else: | |
| 247 | + set_cf_int(cf, "job.popen.timeout") | |
| 197 | 248 | |
| 198 | 249 | if is_int(cf["job.popen.waittime"]) is False: |
| 199 | 250 | print >>sys.stderr, 'Must be a number. - job.popen.waittime=%s' % (cf["job.popen.waittime"]) |
| 200 | 251 | return False |
| 252 | + else: | |
| 253 | + set_cf_int(cf, "job.popen.waittime") | |
| 254 | + | |
| 255 | + if is_int(cf["job.popen.output.limit"]) is False: | |
| 256 | + print >>sys.stderr, 'Must be a number. - job.popen.output.limit=%s' % (cf["job.popen.output.limit"]) | |
| 257 | + return False | |
| 258 | + else: | |
| 259 | + set_cf_int(cf, "job.popen.output.limit") | |
| 260 | + | |
| 261 | + # performer | |
| 262 | + p_mkfifo = set([cf["performer.mkfifo.start.code"], | |
| 263 | + cf["performer.mkfifo.ignore.code"], | |
| 264 | + cf["performer.mkfifo.stop.code"]], | |
| 265 | + ) | |
| 266 | + if len(p_mkfifo) != 3: | |
| 267 | + print >>sys.stderr, 'Is not unique. - performer.mkfifo.[start,ignore,stop]=%s,%s,%s' \ | |
| 268 | + % (cf["performer.mkfifo.start.code"], | |
| 269 | + cf["performer.mkfifo.ignore.code"], | |
| 270 | + cf["performer.mkfifo.stop.code"], | |
| 271 | + ) | |
| 272 | + return False | |
| 273 | + | |
| 274 | + try: | |
| 275 | + pwd.getpwnam(cf["performer.mkfifo.user.name"]) | |
| 276 | + except: | |
| 277 | + print >>sys.stderr, 'Can not get information of the user (nonexistent?). - performer.mkfifo.user.name=%s' % (cf["performer.mkfifo.user.name"]) | |
| 278 | + | |
| 279 | + try: | |
| 280 | + grp.getgrnam(cf["performer.mkfifo.group.name"]) | |
| 281 | + except: | |
| 282 | + print >>sys.stderr, 'Can not get information of the group (nonexistent?). - performer.mkfifo.group.name=%s' % (cf["performer.mkfifo.group.name"]) | |
| 283 | + | |
| 284 | + try: | |
| 285 | + int(cf["performer.mkfifo.perms"], 8) | |
| 286 | + except: | |
| 287 | + print >>sys.stderr, 'Incorrect file permissions. - performer.mkfifo.perms=%s' % (cf["performer.mkfifo.perms"]) | |
| 288 | + return False | |
| 201 | 289 | |
| 202 | - s_mkfifo = set([cf["observer.mkfifo.start.code"], | |
| 203 | - cf["observer.mkfifo.ignore.code"], | |
| 204 | - cf["observer.mkfifo.stop.code"]], | |
| 290 | + # asynperformer | |
| 291 | + a_mkfifo = set([cf["asynperformer.mkfifo.start.code"], | |
| 292 | + cf["asynperformer.mkfifo.ignore.code"], | |
| 293 | + cf["asynperformer.mkfifo.stop.code"]], | |
| 205 | 294 | ) |
| 206 | - if len(s_mkfifo) != 3: | |
| 207 | - print >>sys.stderr, 'Is not unique. - observer.mkfifo.[start,ignore,stop]=%s,%s,%s' \ | |
| 208 | - % (cf["observer.mkfifo.start.code"], | |
| 209 | - cf["observer.mkfifo.ignore.code"], | |
| 210 | - cf["observer.mkfifo.stop.code"], | |
| 295 | + if len(a_mkfifo) != 3: | |
| 296 | + print >>sys.stderr, 'Is not unique. - asynperformer.mkfifo.[start,ignore,stop]=%s,%s,%s' \ | |
| 297 | + % (cf["asynperformer.mkfifo.start.code"], | |
| 298 | + cf["asynperformer.mkfifo.ignore.code"], | |
| 299 | + cf["asynperformer.mkfifo.stop.code"], | |
| 211 | 300 | ) |
| 212 | 301 | return False |
| 213 | 302 | |
| 214 | 303 | try: |
| 215 | - pwd.getpwnam(cf["observer.mkfifo.user.name"]) | |
| 304 | + pwd.getpwnam(cf["asynperformer.mkfifo.user.name"]) | |
| 216 | 305 | except: |
| 217 | - print >>sys.stderr, 'Can not get information of the user (nonexistent?). - observer.mkfifo.user.name=%s' % (cf["observer.mkfifo.user.name"]) | |
| 306 | + print >>sys.stderr, 'Can not get information of the user (nonexistent?). - asynperformer.mkfifo.user.name=%s' % (cf["asynperformer.mkfifo.user.name"]) | |
| 218 | 307 | |
| 219 | 308 | try: |
| 220 | - grp.getgrnam(cf["observer.mkfifo.group.name"]) | |
| 309 | + grp.getgrnam(cf["asynperformer.mkfifo.group.name"]) | |
| 221 | 310 | except: |
| 222 | - print >>sys.stderr, 'Can not get information of the group (nonexistent?). - observer.mkfifo.group.name=%s' % (cf["observer.mkfifo.group.name"]) | |
| 311 | + print >>sys.stderr, 'Can not get information of the group (nonexistent?). - asynperformer.mkfifo.group.name=%s' % (cf["asynperformer.mkfifo.group.name"]) | |
| 223 | 312 | |
| 224 | 313 | try: |
| 225 | - int(cf["observer.mkfifo.perms"], 8) | |
| 314 | + int(cf["asynperformer.mkfifo.perms"], 8) | |
| 226 | 315 | except: |
| 227 | - print >>sys.stderr, 'Incorrect file permissions. - observer.mkfifo.perms=%s' % (cf["observer.mkfifo.perms"]) | |
| 316 | + print >>sys.stderr, 'Incorrect file permissions. - asynperformer.mkfifo.perms=%s' % (cf["asynperformer.mkfifo.perms"]) | |
| 228 | 317 | return False |
| 229 | 318 | |
| 230 | 319 | if cf.has_key("job.whitelist.flag") is True \ |
| @@ -235,8 +324,6 @@ def chk_conf(cf): | ||
| 235 | 324 | print >>sys.stderr, 'File not found. - job.whitelist.path=%s' % (cf["job.whitelist.path"]) |
| 236 | 325 | return False |
| 237 | 326 | |
| 238 | - | |
| 239 | - | |
| 240 | 327 | # database.pool.status |
| 241 | 328 | if (cf["database.pool.status"] in ("0","1")) is False: |
| 242 | 329 | print >>sys.stderr, 'The mistake is found in the set value. Please set 0 or 1. - database.pool.status' |
| @@ -257,20 +344,41 @@ def chk_conf(cf): | ||
| 257 | 344 | if is_int(cf["database.pool.max.overflow"]) is False: |
| 258 | 345 | print >>sys.stderr, 'Please set it by the numerical value. - database.pool.max.overflow' |
| 259 | 346 | return False |
| 347 | + else: | |
| 348 | + set_cf_int(cf, "database.pool.max.overflow") | |
| 260 | 349 | |
| 261 | 350 | if is_int(cf["database.pool.size"]) is False: |
| 262 | 351 | print >>sys.stderr, 'Please set it by the numerical value. - database.pool.size' |
| 263 | 352 | return False |
| 353 | + else: | |
| 354 | + set_cf_int(cf, "database.pool.size") | |
| 264 | 355 | |
| 265 | 356 | if int(cf["database.pool.size"]) <= 0: |
| 266 | 357 | print >>sys.stderr, 'Please set values that are larger than 0. - database.pool.size' |
| 267 | 358 | return False |
| 359 | + else: | |
| 360 | + set_cf_int(cf, "database.pool.size") | |
| 268 | 361 | |
| 269 | 362 | # Comparison |
| 270 | 363 | if int(cf["database.pool.max.overflow"]) < int(cf["database.pool.size"]): |
| 271 | 364 | print >>sys.stderr, 'Please set "database.pool.max.overflow" to a value that is larger than "database.pool.size".' |
| 272 | 365 | return False |
| 273 | 366 | |
| 367 | + # asynperformer.thread.pool.size | |
| 368 | + if cf.has_key("asynperformer.thread.pool.size") is False: | |
| 369 | + print >>sys.stderr, 'Configuration information is missing. - asynperformer.thread.pool.size' | |
| 370 | + return False | |
| 371 | + | |
| 372 | + if is_int(cf["asynperformer.thread.pool.size"]) is False: | |
| 373 | + print >>sys.stderr, 'Please set it by the numerical value. - asynperformer.thread.pool.size' | |
| 374 | + return False | |
| 375 | + else: | |
| 376 | + set_cf_int(cf, "asynperformer.thread.pool.size") | |
| 377 | + | |
| 378 | + if int(cf["asynperformer.thread.pool.size"]) <= 0: | |
| 379 | + print >>sys.stderr, 'Please set values that are larger than 0. - asynperformer.thread.pool.size' | |
| 380 | + return False | |
| 381 | + | |
| 274 | 382 | return True |
| 275 | 383 | |
| 276 | 384 | def readconf(path): |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -35,88 +35,78 @@ import signal | ||
| 35 | 35 | import traceback |
| 36 | 36 | import logging |
| 37 | 37 | |
| 38 | +from pysilhouette import PROCERROR, PROCSUCCESS | |
| 39 | +from pysilhouette.er import ER | |
| 40 | +from pysilhouette.db import create_database, Database | |
| 38 | 41 | from pysilhouette.log import reload_conf |
| 39 | -from pysilhouette.prep import readconf, getopts, chkopts | |
| 40 | -from pysilhouette.util import write_pidfile, create_fifo | |
| 42 | +from pysilhouette.prep import readconf, getopts, chkopts, parse_conf | |
| 41 | 43 | |
| 42 | -def sigterm_handler(signum, frame): | |
| 43 | - global logger | |
| 44 | - logger.info('Stop the schedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
| 44 | +ENTITYS = ('performer',) | |
| 45 | 45 | |
| 46 | -def scheduler(): | |
| 47 | - global opts | |
| 48 | - global cf | |
| 49 | - global logger | |
| 46 | +class Scheduler(ER): | |
| 47 | + """Scheduler Class | |
| 48 | + """ | |
| 49 | + def __init__(self, opts, cf): | |
| 50 | + ER.__init__(self, opts, cf) | |
| 51 | + for entity in ENTITYS: | |
| 52 | + self._fifo(entity) | |
| 50 | 53 | |
| 51 | - if os.access(cf["observer.mkfifo.path"], os.F_OK|os.R_OK|os.W_OK) is False: | |
| 52 | - try: | |
| 53 | - os.unlink(cf["observer.mkfifo.path"]) | |
| 54 | - logger.info('Deleted filo file. - file=%s' % cf["observer.mkfifo.path"]) | |
| 55 | - except: | |
| 56 | - pass # Not anything | |
| 57 | - create_fifo(cf["observer.mkfifo.path"], | |
| 58 | - cf["observer.mkfifo.user.name"], | |
| 59 | - cf["observer.mkfifo.group.name"], | |
| 60 | - cf["observer.mkfifo.perms"], | |
| 61 | - ) | |
| 62 | - | |
| 63 | - logger.info('The fifo file was created. - file=%s' % cf["observer.mkfifo.path"]) | |
| 64 | - | |
| 65 | - if opts.daemon is True: | |
| 66 | - pid = os.getpid() | |
| 67 | - if write_pidfile(opts.pidfile, pid): | |
| 68 | - logger.info('The process file was created. - file=%s' % opts.pidfile) | |
| 69 | - else: | |
| 70 | - logger.info('Could not create process file. - file=%s' % opts.pidfile) | |
| 71 | - return 1 | |
| 72 | - | |
| 73 | - logger.info('schedulerd started!!') | |
| 74 | - | |
| 75 | - while True: | |
| 76 | - try: | |
| 77 | - fp = open(cf["observer.mkfifo.path"], 'w') | |
| 54 | + self._setdaemon() | |
| 55 | + create_database(cf) | |
| 56 | + | |
| 57 | + def process(self): | |
| 58 | + self.logger.info('scheduler : [started]') | |
| 59 | + while True: | |
| 78 | 60 | try: |
| 79 | - fp.write(cf['observer.mkfifo.start.code']) | |
| 80 | - logger.info('Start code was written. - file=%s : code=%s' | |
| 81 | - % (cf["observer.mkfifo.path"], cf['observer.mkfifo.start.code'])) | |
| 82 | - finally: | |
| 83 | - fp.close() | |
| 84 | - | |
| 85 | - logger.debug('interval start, interval=%s' % (cf['scheduler.interval'])) | |
| 86 | - time.sleep(int(cf['scheduler.interval'])) | |
| 87 | - except IOError, i: | |
| 88 | - if i.errno == 4: | |
| 89 | - return 0 # When ending with the signal | |
| 90 | - | |
| 91 | - # beyond expectation | |
| 92 | - logger.error('file=%s - 2 error write FIFO, code=%s' | |
| 93 | - % (self.fifo, cf['observer.mkfifo.start.code'])) | |
| 94 | - return 1 | |
| 95 | - | |
| 96 | -def main(): | |
| 97 | - global opts | |
| 98 | - global cf | |
| 99 | - global logger | |
| 61 | + for entity in ENTITYS: | |
| 62 | + fp = open(self.cf["%s.mkfifo.path" % entity], 'w') | |
| 63 | + try: | |
| 64 | + fp.write(str(self.cf['%s.mkfifo.start.code' % entity])) | |
| 65 | + | |
| 66 | + #self.logger.info('Start code was written. - file=%s : code=%s' % (self.cf["%s.mkfifo.path" % entity], self.cf['%s.mkfifo.start.code' % entity])) | |
| 67 | + self.logger.info('Activity Information. - [fifo code=%s]' % (self.cf['%s.mkfifo.start.code' % entity])) | |
| 68 | + finally: | |
| 69 | + fp.close() | |
| 70 | + | |
| 71 | + self.logger.debug('interval start, interval=%s' % (self.cf['scheduler.interval'])) | |
| 72 | + time.sleep(self.cf['scheduler.interval']) | |
| 100 | 73 | |
| 74 | + except IOError, i: | |
| 75 | + if i.errno == 4: | |
| 76 | + return PROCSUCCESS # When ending with the signal | |
| 77 | + | |
| 78 | + return PROCERROR # beyond expectation | |
| 79 | + | |
| 80 | +def sigterm_handler(signum, frame): | |
| 81 | + logger = logging.getLogger('pysilhouette.scheduler.signal') | |
| 82 | + logger.warning('Stop the schedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
| 83 | + | |
| 84 | + | |
| 85 | +def main(): | |
| 101 | 86 | (opts, args) = getopts() |
| 102 | 87 | if chkopts(opts) is True: |
| 103 | - return 1 | |
| 88 | + return PROCERROR | |
| 104 | 89 | |
| 105 | 90 | cf = readconf(opts.config) |
| 106 | 91 | if cf is None: |
| 107 | 92 | print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0]) |
| 108 | - return 1 | |
| 93 | + return PROCERROR | |
| 109 | 94 | |
| 95 | + # conf parse | |
| 96 | + if parse_conf(cf) is False: | |
| 97 | + return PROCERROR | |
| 98 | + | |
| 110 | 99 | if reload_conf(cf["env.sys.log.conf.path"]): |
| 111 | 100 | logger = logging.getLogger('pysilhouette.scheduler') |
| 112 | 101 | else: |
| 113 | 102 | print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0] |
| 114 | - return 1 | |
| 103 | + return PROCERROR | |
| 115 | 104 | |
| 116 | 105 | try: |
| 117 | 106 | try: |
| 118 | 107 | signal.signal(signal.SIGTERM, sigterm_handler) |
| 119 | - ret = scheduler() # start!! | |
| 108 | + scheduler = Scheduler(opts, cf) | |
| 109 | + ret = scheduler.process() # start!! | |
| 120 | 110 | return ret |
| 121 | 111 | except KeyboardInterrupt, k: |
| 122 | 112 | logger.critical('Keyboard interrupt occurred. - %s' % str(k.args)) |
| @@ -131,7 +121,9 @@ def main(): | ||
| 131 | 121 | finally: |
| 132 | 122 | if opts.daemon is True and os.path.isfile(opts.pidfile): |
| 133 | 123 | os.unlink(opts.pidfile) |
| 134 | - logger.info('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
| 124 | + logger.warning('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
| 125 | + | |
| 126 | + return PROCERROR | |
| 135 | 127 | |
| 136 | 128 | if __name__ == '__main__': |
| 137 | 129 | sys.exit(main()) |
| @@ -40,8 +40,9 @@ except ImportError, e: | ||
| 40 | 40 | print >>sys.stderr, '[Error] There are not enough libraries. - %s' % str(e.args) |
| 41 | 41 | #traceback.format_exc() |
| 42 | 42 | sys.exit(1) |
| 43 | - | |
| 44 | -from pysilhouette.prep import readconf, getopts, chkopts, chk_conf | |
| 43 | + | |
| 44 | +from pysilhouette import PROCERROR, PROCSUCCESS | |
| 45 | +from pysilhouette.prep import readconf, getopts, chkopts, parse_conf | |
| 45 | 46 | from pysilhouette.daemon import daemonize, observer |
| 46 | 47 | from pysilhouette.log import reload_conf |
| 47 | 48 |
| @@ -60,33 +61,33 @@ def main(): | ||
| 60 | 61 | |
| 61 | 62 | (opts, args) = getopts() |
| 62 | 63 | if chkopts(opts) is True: |
| 63 | - return 1 | |
| 64 | + return PROCERROR | |
| 64 | 65 | |
| 65 | 66 | #### |
| 66 | 67 | try: |
| 67 | 68 | opts.config = os.path.abspath(opts.config) |
| 68 | 69 | except AttributeError, e: |
| 69 | 70 | print >>sys.stderr, 'No configuration file path.' |
| 70 | - return 1 | |
| 71 | + return PROCERROR | |
| 71 | 72 | |
| 72 | 73 | cf = readconf(opts.config) |
| 73 | 74 | if cf is None: |
| 74 | 75 | print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0]) |
| 75 | - return 1 | |
| 76 | + return PROCERROR | |
| 76 | 77 | |
| 77 | - # conf check | |
| 78 | - if chk_conf(cf) is False: | |
| 79 | - return 1 | |
| 78 | + # conf parse | |
| 79 | + if parse_conf(cf) is False: | |
| 80 | + return PROCERROR | |
| 80 | 81 | |
| 81 | 82 | if reload_conf(cf["env.sys.log.conf.path"]): |
| 82 | 83 | logger = logging.getLogger('pysilhouette.silhouette') |
| 83 | 84 | else: |
| 84 | 85 | print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0] |
| 85 | - return 1 | |
| 86 | + return PROCERROR | |
| 86 | 87 | |
| 87 | 88 | if opts.uniqkey: |
| 88 | 89 | print >>sys.stdout, cf["env.uniqkey"] |
| 89 | - return 0 | |
| 90 | + return PROCSUCCESS | |
| 90 | 91 | |
| 91 | 92 | if opts.daemon is True: |
| 92 | 93 | logger.debug('Daemon stdin=%s' % cf['daemon.stdin']) |
| @@ -97,7 +98,7 @@ def main(): | ||
| 97 | 98 | stderr=cf['daemon.stderr'], |
| 98 | 99 | pidfile=opts.pidfile) |
| 99 | 100 | logger.info('Daemon Running!! pid=%s' % pid) |
| 100 | - | |
| 101 | + | |
| 101 | 102 | try: |
| 102 | 103 | signal.signal(signal.SIGTERM, sigterm_handler) |
| 103 | 104 | ret = observer(opts=opts, cf=cf) # start!! |
| @@ -111,6 +112,8 @@ def main(): | ||
| 111 | 112 | t_logger = logging.getLogger('pysilhouette_traceback') |
| 112 | 113 | t_logger.critical(traceback.format_exc()) |
| 113 | 114 | print >>sys.stderr, traceback.format_exc() |
| 114 | - | |
| 115 | + | |
| 116 | + return PROCERROR | |
| 117 | + | |
| 115 | 118 | if __name__ == '__main__': |
| 116 | 119 | sys.exit(main()) |
| @@ -133,12 +133,6 @@ class TestUtil(unittest.TestCase): | ||
| 133 | 133 | self.assertEquals('30', ret) |
| 134 | 134 | self.unlink(self.pname) |
| 135 | 135 | |
| 136 | - def test_read_pidfile_1(self): | |
| 137 | - self.unlink(self.pname) | |
| 138 | - | |
| 139 | - ret = target.read_pidfile(self.pname) | |
| 140 | - self.assertEquals('', ret) | |
| 141 | - | |
| 142 | 136 | class SuiteIsSplitShellCommand(unittest.TestSuite): |
| 143 | 137 | def __init__(self): |
| 144 | 138 | tests = ['test_split_shell_command_0', |
| @@ -110,7 +110,7 @@ class TestWorker(unittest.TestCase): | ||
| 110 | 110 | _m_jgs = jobgroup_findbystatus(sess) |
| 111 | 111 | for _m_jg in _m_jgs: |
| 112 | 112 | _w = Worker(self._db, _m_jg.id) |
| 113 | - _w.run() | |
| 113 | + _w.process() | |
| 114 | 114 | worker_debug(self._db, _m_jg.id) |
| 115 | 115 | sess.close() |
| 116 | 116 |
| @@ -359,7 +359,6 @@ class TestWorker(unittest.TestCase): | ||
| 359 | 359 | - Job : First Job Abnormal termination |
| 360 | 360 | - Rollback: Abnormal termination |
| 361 | 361 | - Send Mail : None |
| 362 | - </comment-en> | |
| 363 | 362 | """ |
| 364 | 363 | sess = self._db.get_session() |
| 365 | 364 | self.set_job(sess, 'Test Case 13', 'b942f21c-4039-e6e9-09dc-9685985a1b84', |
| @@ -59,26 +59,19 @@ def split_shell_command(cmd): | ||
| 59 | 59 | return ret |
| 60 | 60 | |
| 61 | 61 | def write_pidfile(fname, pid): |
| 62 | + fp = open(fname, 'w') | |
| 62 | 63 | try: |
| 63 | - fp = open(fname, 'w') | |
| 64 | - try: | |
| 65 | - fp.write('%d' % pid) | |
| 66 | - return True | |
| 67 | - finally: | |
| 68 | - fp.close() | |
| 69 | - | |
| 70 | - except: | |
| 71 | - return False | |
| 64 | + fp.write('%d' % pid) | |
| 65 | + return True | |
| 66 | + finally: | |
| 67 | + fp.close() | |
| 72 | 68 | |
| 73 | 69 | def read_pidfile(fname): |
| 70 | + fp = open(fname, 'r') | |
| 74 | 71 | try: |
| 75 | - fp = open(fname, 'r') | |
| 76 | - try: | |
| 77 | - return fp.read() | |
| 78 | - finally: | |
| 79 | - fp.close() | |
| 80 | - except: | |
| 81 | - return '' | |
| 72 | + return fp.read() | |
| 73 | + finally: | |
| 74 | + fp.close() | |
| 82 | 75 | |
| 83 | 76 | |
| 84 | 77 | def create_fifo(fname, user, group, perm): |
| @@ -92,14 +85,10 @@ def create_fifo(fname, user, group, perm): | ||
| 92 | 85 | @param: perm: Permission - example) '0666' |
| 93 | 86 | @type: perm: str(4) |
| 94 | 87 | """ |
| 95 | - try: | |
| 96 | - perm8 = int(perm, 8) | |
| 97 | - os.mkfifo(fname, perm8) | |
| 98 | - os.chown(fname, pwd.getpwnam(user)[2], grp.getgrnam(group)[2]) | |
| 99 | - os.chmod(fname, perm8) | |
| 100 | - return True | |
| 101 | - except OSError ,o: | |
| 102 | - return False | |
| 88 | + perm8 = int(perm, 8) | |
| 89 | + os.mkfifo(fname, perm8) | |
| 90 | + os.chown(fname, pwd.getpwnam(user)[2], grp.getgrnam(group)[2]) | |
| 91 | + os.chmod(fname, perm8) | |
| 103 | 92 | |
| 104 | 93 | def kill_proc(proc): |
| 105 | 94 | if proc and hasattr(os, 'kill'): |
| @@ -113,7 +102,11 @@ def kill_proc(proc): | ||
| 113 | 102 | except: |
| 114 | 103 | return False |
| 115 | 104 | |
| 116 | -def popen(cmd, timeout, waittime, lang, limit=4096, job_id=None): | |
| 105 | +def popen(cmd, timeout, waittime, lang, limit=1048576, job_id=None): | |
| 106 | + """<comment-ja> | |
| 107 | + @param limit: 1048576(1MByte) | |
| 108 | + @type limit: int | |
| 109 | + """ | |
| 117 | 110 | |
| 118 | 111 | proc_info = {} |
| 119 | 112 |
| @@ -197,6 +190,15 @@ def is_int(val): | ||
| 197 | 190 | except: |
| 198 | 191 | return False |
| 199 | 192 | |
| 193 | +def is_key(cf, key): | |
| 194 | + if cf.has_key(key) is True and 0 < len(cf[key]): | |
| 195 | + return True | |
| 196 | + else: | |
| 197 | + return False | |
| 198 | + | |
| 199 | +def set_cf_int(cf, key): | |
| 200 | + cf[key] = int(cf[key]) | |
| 201 | + | |
| 200 | 202 | if __name__ == '__main__': |
| 201 | 203 | #print popen(cmd='efdsfdsafdsafdsafdsafdsa', timeout=3, waittime=1, lang='C') |
| 202 | 204 | print popen(cmd='date', timeout=3, waittime=1, lang='C') |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -30,6 +30,7 @@ | ||
| 30 | 30 | |
| 31 | 31 | import subprocess |
| 32 | 32 | import os |
| 33 | +import sys | |
| 33 | 34 | import traceback |
| 34 | 35 | import logging |
| 35 | 36 |
| @@ -50,14 +51,7 @@ class SilhouetteWorkerException(pysilhouette.SilhouetteException): | ||
| 50 | 51 | class Worker: |
| 51 | 52 | """Worker Base class |
| 52 | 53 | """ |
| 53 | - | |
| 54 | - def __init__(self, cf, db, jobgroup_id): | |
| 55 | - self._cf = cf | |
| 56 | - self._db = db | |
| 57 | - self._jobgroup_id = jobgroup_id | |
| 58 | - self.logger = logging.getLogger('pysilhouette.performer.worker') | |
| 59 | - | |
| 60 | - def run(self): | |
| 54 | + def process(self): | |
| 61 | 55 | try: |
| 62 | 56 | session = self._db.get_session() |
| 63 | 57 | self.logger.debug('Session was obtained from the database. - session=%s' % session) |
| @@ -66,7 +60,6 @@ class Worker: | ||
| 66 | 60 | self._cf['env.uniqkey']) |
| 67 | 61 | |
| 68 | 62 | if self._m_jg is None: return False |
| 69 | - | |
| 70 | 63 | jobgroup_update(session, self._m_jg, JOBGROUP_STATUS['RUN']) # JobGroup UPDATE |
| 71 | 64 | _m_jobs = job_findbyjobgroup_id(session, self._jobgroup_id, False) # order asc |
| 72 | 65 |
| @@ -76,12 +69,12 @@ class Worker: | ||
| 76 | 69 | try: |
| 77 | 70 | ret = self._action(session, _m_jobs) |
| 78 | 71 | except Exception, e: |
| 79 | - self.logger.info('Failed to perform the job action. Exceptions are not expected. - jobgroup_id=%d : %s' | |
| 80 | - % (self._jobgroup_id, str(e.args))) | |
| 81 | - | |
| 82 | - jobgroup_update(session, self._m_jg, JOBGROUP_STATUS['APPERR']) | |
| 83 | 72 | t_logger = logging.getLogger('pysilhouette_traceback') |
| 84 | 73 | t_logger.info(traceback.format_exc()) |
| 74 | + self.logger.info('%s, Failed to perform the job action. Exceptions are not expected. - jobgroup_id=%d : %s, JobGroup status=%s' | |
| 75 | + % (self.getName(), self._jobgroup_id, str(e.args), JOBGROUP_STATUS['APPERR'])) | |
| 76 | + | |
| 77 | + jobgroup_update(session, self._m_jg, JOBGROUP_STATUS['APPERR']) | |
| 85 | 78 | err = True |
| 86 | 79 | |
| 87 | 80 | try: |
| @@ -192,6 +185,11 @@ class Worker: | ||
| 192 | 185 | class SimpleWorker(Worker): |
| 193 | 186 | """Sequential Worker Class |
| 194 | 187 | """ |
| 188 | + def __init__(self, cf, db, jobgroup_id): | |
| 189 | + self._cf = cf | |
| 190 | + self._db = db | |
| 191 | + self._jobgroup_id = jobgroup_id | |
| 192 | + self.logger = logging.getLogger('pysilhouette.worker.simpleworker') | |
| 195 | 193 | |
| 196 | 194 | def _action(self, session, m_jobs): |
| 197 | 195 | ret = True |
| @@ -207,23 +205,33 @@ class SimpleWorker(Worker): | ||
| 207 | 205 | lcmd = split_shell_command(cmd) |
| 208 | 206 | if self.chk_whitelist(lcmd[0]): |
| 209 | 207 | try: |
| 210 | - (proc, proc_info) = popen(lcmd, | |
| 211 | - self._cf['job.popen.timeout'], | |
| 212 | - self._cf['job.popen.waittime'], | |
| 213 | - self._cf['job.popen.env.lang'], | |
| 214 | - m_job.STD_OUTPUT_LIMIT, | |
| 215 | - m_job.id, | |
| 208 | + (proc, proc_info) = popen(cmd=lcmd, | |
| 209 | + timeout=self._cf['job.popen.timeout'], | |
| 210 | + waittime=self._cf['job.popen.waittime'], | |
| 211 | + lang=self._cf['job.popen.env.lang'], | |
| 212 | + limit=self._cf['job.popen.output.limit'], | |
| 213 | + job_id=m_job.id, | |
| 216 | 214 | ) |
| 217 | 215 | |
| 218 | 216 | self.logger.debug('Of commands executed stdout=%s' % proc_info['stdout']) |
| 219 | 217 | self.logger.debug('Of commands executed stderr=%s' % proc_info['stderr']) |
| 220 | 218 | |
| 219 | + if self._cf['job.popen.output.limit'] < len(proc_info['stdout']): | |
| 220 | + self.logger.info("There was a limit beyond stdout output. Information-processing is truncated beyond the limit. - limit=%d, stdout=%d" \ | |
| 221 | + % (self._cf['job.popen.output.limit'], len(proc_info['stdout']))) | |
| 222 | + | |
| 223 | + if self._cf['job.popen.output.limit'] < len(proc_info['stderr']): | |
| 224 | + self.logger.info("There was a limit beyond stderr output. Information-processing is truncated beyond the limit. - limit=%d, stderr=%d" \ | |
| 225 | + % (self._cf['job.popen.output.limit'], len(proc_info['stderr']))) | |
| 226 | + | |
| 227 | + | |
| 221 | 228 | except OSError, oe: |
| 222 | 229 | self.logger.info('action command system failed!! job_id=%d : cmd=%s' |
| 223 | 230 | % (m_job.id, cmd)) |
| 224 | 231 | raise oe |
| 225 | - | |
| 232 | + | |
| 226 | 233 | job_result_action(session, m_job, proc_info) # Job result UPDATE |
| 234 | + | |
| 227 | 235 | if proc_info['r_code'] == 0: # Normal end |
| 228 | 236 | self.logger.info('action command was successful!! job_id=%d : cmd=%s' |
| 229 | 237 | % (m_job.id, cmd)) |
| @@ -266,11 +274,11 @@ class SimpleWorker(Worker): | ||
| 266 | 274 | |
| 267 | 275 | if self.chk_whitelist(lcmd[0]): |
| 268 | 276 | try: |
| 269 | - (proc, proc_info) = popen(lcmd, | |
| 270 | - self._cf['job.popen.timeout'], | |
| 271 | - self._cf['job.popen.waittime'], | |
| 272 | - self._cf['job.popen.env.lang'], | |
| 273 | - m_job.STD_OUTPUT_LIMIT, | |
| 277 | + (proc, proc_info) = popen(cmd=lcmd, | |
| 278 | + timeout=self._cf['job.popen.timeout'], | |
| 279 | + waittime=self._cf['job.popen.waittime'], | |
| 280 | + lang=self._cf['job.popen.env.lang'], | |
| 281 | + limit=self._cf['job.popen.output.limit'], | |
| 274 | 282 | ) |
| 275 | 283 | |
| 276 | 284 | self.logger.debug('Of commands executed stdout=%s' % proc_info['stdout']) |
| @@ -304,25 +312,162 @@ class SimpleWorker(Worker): | ||
| 304 | 312 | self.logger.debug('Does not rollback the process. - job_id=%d : status=%s' |
| 305 | 313 | % (m_job.id, m_job.status)) |
| 306 | 314 | |
| 315 | +# -- | |
| 316 | +import threading | |
| 317 | + | |
| 318 | +class ThreadQueue(threading.Thread): | |
| 319 | + def __init__(self, request_queue, response_list, *args, **kwargs): | |
| 320 | + threading.Thread.__init__(self, *args, **kwargs) | |
| 321 | + self.setDaemon(1) | |
| 322 | + self.request_queue = request_queue | |
| 323 | + self.response_list = response_list | |
| 324 | + self.start() | |
| 325 | + self.logger = logging.getLogger('pysilhouette.worker.threadqueue') | |
| 326 | + | |
| 327 | + def now_alive(self): | |
| 328 | + ret = 0 | |
| 329 | + for th in self.response_list: | |
| 330 | + if th[0].isAlive() is True: | |
| 331 | + ret =+ 1 | |
| 332 | + return ret | |
| 333 | + | |
| 334 | + def response_clean(self): | |
| 335 | + _size = 0 | |
| 336 | + _len = len(self.response_list) | |
| 337 | + while _size < _len: | |
| 338 | + if self.response_list[_size][0].isAlive() is False: | |
| 339 | + self.response_list.pop(_size) # remove | |
| 340 | + _len =- 1 | |
| 341 | + else: | |
| 342 | + _size =+ 1 | |
| 343 | + return len(self.response_list) | |
| 344 | + | |
| 345 | + def put(self, callable, *args, **kwargs): | |
| 346 | + self.logger.debug('callable=%s' % str(callable)) | |
| 347 | + self.request_queue.put((callable, args, kwargs)) | |
| 348 | + | |
| 349 | + def run(self): | |
| 350 | + while True: | |
| 351 | + callable, args, kwargs = self.request_queue.get() | |
| 352 | + self.logger.debug('callable=%s, args=%s, kwargs=%s' \ | |
| 353 | + % (str(callable), str(args), str(kwargs))) | |
| 354 | + callable.start() | |
| 355 | + self.response_list.append((callable, args, kwargs)) | |
| 356 | + | |
| 357 | +class ThreadWorker(threading.Thread, SimpleWorker): | |
| 358 | + def __init__(self, cf, db, jobgroup_id): | |
| 359 | + threading.Thread.__init__(self) | |
| 360 | + self._cf = cf | |
| 361 | + self._jobgroup_id = jobgroup_id | |
| 362 | + self._db = db | |
| 363 | + | |
| 364 | + def run(self): | |
| 365 | + self.logger = logging.getLogger('pysilhouette.worker.threadworker') | |
| 366 | + try: | |
| 367 | + self.process() | |
| 368 | + except Exception, e: | |
| 369 | + self.logger.error('%s - JobGroup execute failed. - jobgroup_id=%d : %s, JobGroup status=%s' \ | |
| 370 | + % (self.getName(), self._jobgroup_id, str(e.args), JOBGROUP_STATUS['APPERR'])) | |
| 371 | + t_logger = logging.getLogger('pysilhouette_traceback') | |
| 372 | + t_logger.error(traceback.format_exc()) | |
| 373 | + try: | |
| 374 | + session = self._db.get_session() | |
| 375 | + jobgroup_update(session, | |
| 376 | + jobgroup_findbyid(session, self._jobgroup_id, self._cf['env.uniqkey']), | |
| 377 | + JOBGROUP_STATUS['APPERR']) | |
| 378 | + session.close() | |
| 379 | + except: | |
| 380 | + self.logger.error('JobGroup failed to update. - jobgroup_id=%d : %s, update status=%s' \ | |
| 381 | + % (self._jobgroup_id, str(e.args), JOBGROUP_STATUS['APPERR'])) | |
| 382 | + t_logger.error(traceback.format_exc()) | |
| 383 | + | |
| 384 | +def dummy_set_job(cf, number, action, rollback, finish, type, db=None): | |
| 385 | + try: | |
| 386 | + if db is None: | |
| 387 | + db = Database(cf['database.url'],encoding="utf-8",convert_unicode=True,echo=True,echo_pool=True) | |
| 388 | + reload_mappers(db.get_metadata()) | |
| 389 | + | |
| 390 | + session = db.get_session() | |
| 391 | + except Exception, e: | |
| 392 | + print >>sys.stderr, 'Initializing a database error' | |
| 393 | + raise | |
| 394 | + | |
| 395 | + try: | |
| 396 | + jgs = [] | |
| 397 | + for i in range(number): | |
| 398 | + jg_name = u'%s-%d' % ('worker#dummy_set_job#jobgroup', i) | |
| 399 | + jg_ukey = unicode(cf['env.uniqkey'], "utf-8") | |
| 400 | + jg = JobGroup(jg_name, jg_ukey) | |
| 401 | + if not finish is None: | |
| 402 | + jg.finish_command = unicode(finish, "utf-8") | |
| 403 | + if type == 'serial': | |
| 404 | + jg.type = JOBGROUP_TYPE['SERIAL'] | |
| 405 | + elif type == 'parallel': | |
| 406 | + jg.type = JOBGROUP_TYPE['PARALLEL'] | |
| 407 | + else: | |
| 408 | + jg.type = JOBGROUP_TYPE['SERIAL'] | |
| 409 | + | |
| 410 | + j_name = u'%s-%d' % ('worker#dummy_set_job#job', i) | |
| 411 | + j_order = i | |
| 412 | + j = Job(j_name, j_order, unicode(action, "utf-8")) | |
| 413 | + if not rollback is None: | |
| 414 | + j.rollback_command = unicode(rollback, "utf-8") | |
| 415 | + | |
| 416 | + jg.jobs.append(j) | |
| 417 | + jgs.append(jg) | |
| 418 | + | |
| 419 | + session.add_all(jgs) | |
| 420 | + session.commit() | |
| 421 | + session.close() | |
| 422 | + print >>sys.stdout, 'Insert JobGroup and Job. num=%d [OK]' % number | |
| 423 | + except Exception, e: | |
| 424 | + print >>sys.stderr, 'Failed to add JobGroup and Job.' | |
| 425 | + raise | |
| 426 | + | |
| 427 | + | |
| 428 | + | |
| 307 | 429 | if __name__ == '__main__': |
| 308 | - """Testing | |
| 309 | - """ | |
| 310 | - import pysilhouette.tests.testworker | |
| 311 | - # dev start | |
| 430 | + import Queue | |
| 431 | + request_queue = Queue.Queue() | |
| 432 | + #response_queue = Queue.Queue() | |
| 433 | + response_list = [] | |
| 434 | + tq = ThreadQueue(request_queue, response_list) | |
| 312 | 435 | _env = os.environ |
| 313 | 436 | _env['PYSILHOUETTE_CONF'] = '/etc/opt/pysilhouette/silhouette.conf' |
| 314 | - # dev end | |
| 315 | - | |
| 316 | - # init | |
| 317 | 437 | from pysilhouette.prep import readconf |
| 318 | - cf = readconf(os.environ['PYSILHOUETTE_CONF']) | |
| 438 | + #cf = readconf(os.environ['PYSILHOUETTE_CONF']) | |
| 319 | 439 | pysilhouette.cf = pysilhouette.prep.readconf(os.environ['PYSILHOUETTE_CONF']) |
| 440 | + import pysilhouette | |
| 441 | + import pysilhouette.log | |
| 442 | + import sys | |
| 320 | 443 | pysilhouette.log.reload_conf(pysilhouette.cf["env.sys.log.conf.path"]) |
| 321 | - | |
| 322 | - (db, session, _m_jgs) = pysilhouette.tests.testworker.test_setup(cf) | |
| 323 | - # run | |
| 324 | - for _m_jg in _m_jgs: | |
| 325 | - _w = SimpleWorker(db, _m_jg.id) | |
| 326 | - _w.run() | |
| 327 | - pysilhouette.tests.testworker.worker_debug(db, _m_jg.id) | |
| 328 | - session.close() | |
| 444 | + number = 50 | |
| 445 | + dummy_set_job(pysilhouette.cf,number,'echo "aaaaaa"','echo "bbbbb"','echo "cccc"','parallel') | |
| 446 | + try: | |
| 447 | + db = Database(pysilhouette.cf['database.url'],encoding="utf-8",convert_unicode=True,echo=True,echo_pool=True) | |
| 448 | + reload_mappers(db.get_metadata()) | |
| 449 | + session = db.get_session() | |
| 450 | + except Exception, e: | |
| 451 | + print >>sys.stderr, 'Initializing a database error' | |
| 452 | + raise | |
| 453 | + | |
| 454 | + from pysilhouette.db.access import jobgroup_findbytype_status | |
| 455 | + m_jgs = jobgroup_findbytype_status(session, JOBGROUP_TYPE['PARALLEL']) | |
| 456 | + for m_jg in m_jgs: | |
| 457 | + try: | |
| 458 | + tq.put(ThreadWorker(pysilhouette.cf, db, m_jg.id)) | |
| 459 | + except Exception, e: | |
| 460 | + import traceback | |
| 461 | + print traceback.format_exc() | |
| 462 | + | |
| 463 | + import time | |
| 464 | + time.sleep(2) | |
| 465 | + | |
| 466 | + while True: | |
| 467 | + size = tq.response_clean() | |
| 468 | + print 'ret=' + str(size) | |
| 469 | + if size <= 0: | |
| 470 | + break | |
| 471 | + time.sleep(2) | |
| 472 | + | |
| 473 | + print 'end (request_queue size=%d, response_list=%s)' % (tq.request_queue.qsize(), tq.response_list) |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -1,7 +1,13 @@ | ||
| 1 | 1 | #!/bin/bash |
| 2 | +# | |
| 3 | +# epydoc.sh - The command to generate python API documentation pysilhouette | |
| 4 | +# | |
| 5 | +# require rpm - hde-python-epydoc, hde-graphviz-doc, hde-graphviz, hde-graphviz-devel, hde-graphviz-graphs, hde-graphviz-gd | |
| 6 | +# | |
| 2 | 7 | |
| 3 | 8 | name=pysilhouette |
| 4 | -export PYTHONPATH=${PYTHONPATH}:/opt/pysilhouette/lib/python | |
| 9 | +PATH=${PATH}:/opt/hde/bin | |
| 10 | +export PYTHONPATH=${PYTHONPATH}:/opt/hde/lib/python:/opt/pysilhouette/lib/python | |
| 5 | 11 | |
| 6 | 12 | script_dir=`dirname $0` |
| 7 | 13 | pushd $script_dir >/dev/null 2>&1 |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -3,7 +3,7 @@ | ||
| 3 | 3 | # |
| 4 | 4 | # This file is part of Pysilhouette. |
| 5 | 5 | # |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 6 | +# Copyright (c) 2009-2010 HDE, Inc. | |
| 7 | 7 | # |
| 8 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | 9 | # of this software and associated documentation files (the "Software"), to deal |
| @@ -36,7 +36,7 @@ from optparse import OptionParser | ||
| 36 | 36 | |
| 37 | 37 | from pysilhouette.prep import readconf |
| 38 | 38 | from pysilhouette.db import Database, reload_mappers |
| 39 | -from pysilhouette.db.model import JobGroup, Job | |
| 39 | +from pysilhouette.db.model import JobGroup, Job, JOBGROUP_TYPE | |
| 40 | 40 | from pysilhouette import __version__ |
| 41 | 41 | |
| 42 | 42 | usage = '%prog [options]' |
| @@ -44,11 +44,18 @@ usage = '%prog [options]' | ||
| 44 | 44 | def getopts(): |
| 45 | 45 | optp = OptionParser(usage=usage, version=__version__) |
| 46 | 46 | optp.add_option('-c', '--config', dest='config', help='configuration file') |
| 47 | - optp.add_option('-a', '--action', dest='action', action="store", type='string', help='action command') | |
| 48 | - optp.add_option('-m', '--name', dest='name', action="store", type='string', help='action name') | |
| 49 | - optp.add_option('-r', '--rollback', dest='rollback', action="store", type='string', help='rollback command') | |
| 50 | - optp.add_option('-f', '--finish', dest='finish', action="store", type='string', help='finish command') | |
| 51 | - optp.add_option('-n', '--number', dest='number', action="store", type='int', help='number of repeat job', default=1) | |
| 47 | + optp.add_option('-a', '--action', dest='action', action="store", type='string', | |
| 48 | + help='action command') | |
| 49 | + optp.add_option('-t', '--type', dest='type', action="store", type='string', | |
| 50 | + help='Run type. "serial" or "parallel"') | |
| 51 | + optp.add_option('-m', '--name', dest='name', action="store", type='string', | |
| 52 | + help='action name') | |
| 53 | + optp.add_option('-r', '--rollback', dest='rollback', action="store", type='string', | |
| 54 | + help='rollback command') | |
| 55 | + optp.add_option('-f', '--finish', dest='finish', action="store", type='string', | |
| 56 | + help='finish command') | |
| 57 | + optp.add_option('-n', '--number', dest='number', action="store", type='int', | |
| 58 | + help='Test: Number of repeat job', default=1) | |
| 52 | 59 | |
| 53 | 60 | return optp.parse_args() |
| 54 | 61 |
| @@ -69,6 +76,10 @@ def chkopts(opts): | ||
| 69 | 76 | print >>sys.stderr, '-a or --action option is required.' |
| 70 | 77 | return True |
| 71 | 78 | |
| 79 | + if opts.type is None: | |
| 80 | + print >>sys.stderr, '-t or --type option is required.' | |
| 81 | + return True | |
| 82 | + | |
| 72 | 83 | return False |
| 73 | 84 | |
| 74 | 85 | def main(): |
| @@ -113,6 +124,12 @@ def main(): | ||
| 113 | 124 | jg = JobGroup(jg_name, jg_ukey) |
| 114 | 125 | if not opts.finish is None: |
| 115 | 126 | jg.finish_command = unicode(opts.finish, "utf-8") |
| 127 | + if opts.type == 'serial': | |
| 128 | + jg.type = JOBGROUP_TYPE['SERIAL'] | |
| 129 | + elif opts.type == 'parallel': | |
| 130 | + jg.type = JOBGROUP_TYPE['PARALLEL'] | |
| 131 | + else: | |
| 132 | + jg.type = JOBGROUP_TYPE['SERIAL'] | |
| 116 | 133 | |
| 117 | 134 | j_name = u'%s-%d' % (opts.name, i) |
| 118 | 135 | j_order = i |
| @@ -1,99 +0,0 @@ | ||
| 1 | -]#!/usr/bin/env python | |
| 2 | -# -*- coding: utf-8 -*- | |
| 3 | -# | |
| 4 | -# This file is part of Pysilhouette. | |
| 5 | -# | |
| 6 | -# Copyright (c) 2009 HDE, Inc. | |
| 7 | -# | |
| 8 | -# Permission is hereby granted, free of charge, to any person obtaining a copy | |
| 9 | -# of this software and associated documentation files (the "Software"), to deal | |
| 10 | -# in the Software without restriction, including without limitation the rights | |
| 11 | -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| 12 | -# copies of the Software, and to permit persons to whom the Software is | |
| 13 | -# furnished to do so, subject to the following conditions: | |
| 14 | -# | |
| 15 | -# The above copyright notice and this permission notice shall be included in | |
| 16 | -# all copies or substantial portions of the Software. | |
| 17 | -# | |
| 18 | -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| 19 | -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| 20 | -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
| 21 | -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| 22 | -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| 23 | -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
| 24 | -# THE SOFTWARE. | |
| 25 | -# | |
| 26 | - | |
| 27 | -""" | |
| 28 | -@author: Kei Funagayama <kei.funagayama@hde.co.jp> | |
| 29 | -""" | |
| 30 | - | |
| 31 | -import sys | |
| 32 | -import os | |
| 33 | -import logging | |
| 34 | - | |
| 35 | -from pysilhouette.prep import getopts, readconf, chkopts | |
| 36 | -from pysilhouette.db import Database, reload_mappers | |
| 37 | -from pysilhouette.db.model import JobGroup, Job | |
| 38 | - | |
| 39 | -NUM = 10 | |
| 40 | - | |
| 41 | -def main(): | |
| 42 | - (opts, args) = getopts() | |
| 43 | - if chkopts(opts) is True: | |
| 44 | - return 1 | |
| 45 | - | |
| 46 | - try: | |
| 47 | - opts.config = os.path.abspath(opts.config) | |
| 48 | - except AttributeError, e: | |
| 49 | - print >>sys.stderr, 'No configuration file path.' | |
| 50 | - return 1 | |
| 51 | - | |
| 52 | - cf = readconf(opts.config) | |
| 53 | - if cf is None: | |
| 54 | - print >>sys.stderr, 'Failed to load the config file.' | |
| 55 | - return 1 | |
| 56 | - | |
| 57 | - try: | |
| 58 | - db = Database(cf['database.url'], | |
| 59 | - encoding="utf-8", | |
| 60 | - convert_unicode=True, | |
| 61 | - assert_unicode=False, # product | |
| 62 | - #assert_unicode='warn', # dev | |
| 63 | - #echo = opts.verbose, | |
| 64 | - #echo_pool = opts.verbose, | |
| 65 | - echo=True, | |
| 66 | - echo_pool=True | |
| 67 | - ) | |
| 68 | - | |
| 69 | - reload_mappers(db.get_metadata()) | |
| 70 | - session = db.get_session() | |
| 71 | - | |
| 72 | - except Exception, e: | |
| 73 | - print >>sys.stderr, 'Initializing a database error' | |
| 74 | - raise | |
| 75 | - | |
| 76 | - try: | |
| 77 | - jgs = [] | |
| 78 | - for i in range(NUM): | |
| 79 | - jg_name = u'JobGroup-%d' % i | |
| 80 | - jg_ukey = unicode(cf['env.uniqkey'], "utf-8") | |
| 81 | - j_name = u'Job-%d' % i | |
| 82 | - j_order = i | |
| 83 | - j_cmd = u'/bin/echo num=%d' % i | |
| 84 | - jg = JobGroup(jg_name, jg_ukey) | |
| 85 | - jg.jobs.append( | |
| 86 | - Job(j_name, j_order, j_cmd)) | |
| 87 | - jgs.append(jg) | |
| 88 | - | |
| 89 | - session.add_all(jgs) | |
| 90 | - session.commit() | |
| 91 | - session.close() | |
| 92 | - print >>sys.stdout, 'Insert JobGroup and Job. num=%d [OK]' % NUM | |
| 93 | - except: | |
| 94 | - print >>sys.stderr, 'Failed to add JobGroup and Job.' | |
| 95 | - raise | |
| 96 | - | |
| 97 | -if __name__ == '__main__': | |
| 98 | - sys.exit(main()) | |
| 99 | - |