• R/O
  • SSH
  • HTTPS

metasearch: Commit


Commit MetaInfo

Revision539 (tree)
Time2013-05-24 21:18:22
Authorwhitestar

Log Message

(empty log message)

Change Summary

Incremental Difference

--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/hdfs-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/hdfs-site.xml (revision 539)
@@ -0,0 +1,160 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.secondary.namenode.fqdn</name>
22+ <value>localhost</value>
23+ <!-- <value>${this.cluster.name}-cn.${this.domain}</value> -->
24+ </property>
25+
26+ <property>
27+ <name>dfs.namenode.name.dir</name>
28+ <value>file:///grid/vol/0/var/lib/${user.name}/name</value>
29+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/name,file:///export/home/${user.name}/var/lib/name</value> -->
30+ </property>
31+ <property>
32+ <name>dfs.datanode.data.dir</name>
33+ <value>file:///grid/vol/0/var/lib/${user.name}/data</value>
34+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/data,file:///grid/vol/1/var/lib/${user.name}/data</value> -->
35+ </property>
36+ <property>
37+ <name>dfs.namenode.checkpoint.dir</name>
38+ <value>file:///grid/vol/0/var/lib/${user.name}/checkpoint</value>
39+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/checkpoint,file:///export/home/${user.name}/var/lib/checkpoint</value> -->
40+ </property>
41+ <property>
42+ <name>dfs.replication</name>
43+ <value>1</value>
44+ <!-- <value>3</value> -->
45+ </property>
46+
47+ <!--
48+ <property>
49+ <name>dfs.hosts</name>
50+ <value>/grid/usr/hadoop/etc/hadoop/hosts.include</value>
51+ <description>
52+ Names a file that contains a list of hosts that are permitted to connect to the namenode.
53+ The full pathname of the file must be specified. If the value is empty, all hosts are permitted.
54+ </description>
55+ </property>
56+ <property>
57+ <name>dfs.hosts.exclude</name>
58+ <value>/grid/usr/hadoop/etc/hadoop/hosts.exclude</value>
59+ <description>
60+ Names a file that contains a list of hosts that are not permitted to connect to the namenode.
61+ The full pathname of the file must be specified. If the value is empty, no hosts are excluded.
62+ </description>
63+ </property>
64+ -->
65+
66+ <property>
67+ <name>dfs.namenode.kerberos.principal</name>
68+ <value>hdfs/_HOST@${this.realm}</value>
69+ <!-- _HOST is replaced with the fs.defaultFS's host name -->
70+ <!-- <value>hdfs/${this.namenode.fqdn}@${this.realm}</value> -->
71+ <description>Kerberos principal name for the NameNode</description>
72+ </property>
73+ <property>
74+ <name>dfs.namenode.keytab.file</name>
75+ <value>${this.keytab.dir}/nn.keytab</value>
76+ <description>
77+ Combined keytab file containing the namenode service and host
78+ principals.
79+ </description>
80+ </property>
81+ <property>
82+ <name>dfs.secondary.namenode.kerberos.principal</name>
83+ <value>hdfs/${this.secondary.namenode.fqdn}@${this.realm}</value>
84+ <!-- <value>hdfs/_HOST@${this.realm}</value> -->
85+ <description>
86+ Kerberos principal name for the secondary NameNode.
87+ </description>
88+ </property>
89+ <property>
90+ <name>dfs.secondary.namenode.keytab.file</name>
91+ <value>${this.keytab.dir}/cn.keytab</value>
92+ <description>
93+ Combined keytab file containing the namenode service and host
94+ principals.
95+ </description>
96+ </property>
97+ <property>
98+ <name>dfs.block.access.token.enable</name>
99+ <value>true</value>
100+ <description>
101+ If "true", access tokens are used as capabilities for accessing
102+ datanodes.
103+ If "false", no access tokens are checked on accessing datanodes.
104+ </description>
105+ </property>
106+ <property>
107+ <name>dfs.datanode.kerberos.principal</name>
108+ <value>hdfs/localhost@${this.realm}</value>
109+ <!-- <value>hdfs/_HOST@${this.realm}</value> -->
110+ <description>
111+ The Kerberos principal that the DataNode runs as. "_HOST" is
112+ replaced by the real host name.
113+ </description>
114+ </property>
115+ <property>
116+ <name>dfs.datanode.keytab.file</name>
117+ <value>${this.keytab.dir}/dn.keytab</value>
118+ <description>
119+ The filename of the keytab file for the DataNode.
120+ </description>
121+ </property>
122+ <property>
123+ <name>dfs.namenode.kerberos.internal.spnego.principal</name>
124+ <value>${dfs.web.authentication.kerberos.principal}</value>
125+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
126+ <!-- _HOST is replaced with dfs.namenode.http-address's host name. -->
127+ </property>
128+ <property>
129+ <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
130+ <value>${dfs.web.authentication.kerberos.principal}</value>
131+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
132+ <!-- _HOST is replaced with dfs.namenode.secondary.http-address's host name. -->
133+ </property>
134+
135+ <property>
136+ <name>dfs.datanode.address</name>
137+ <value>0.0.0.0:1004</value>
138+ </property>
139+ <property>
140+ <name>dfs.datanode.http.address</name>
141+ <value>0.0.0.0:1006</value>
142+ </property>
143+
144+ <property>
145+ <name>dfs.namenode.http-address</name>
146+ <value>${this.namenode.fqdn}:50070</value>
147+ </property>
148+ <property>
149+ <name>dfs.namenode.secondary.http-address</name>
150+ <value>${this.secondary.namenode.fqdn}:50090</value>
151+ </property>
152+ <property>
153+ <name>dfs.web.authentication.kerberos.principal</name>
154+ <value>HTTP/_HOST@${this.realm}</value>
155+ </property>
156+ <property>
157+ <name>dfs.web.authentication.kerberos.keytab</name>
158+ <value>${this.keytab.dir}/HTTP.keytab</value>
159+ </property>
160+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/core-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/core-site.xml (revision 539)
@@ -0,0 +1,130 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.cluster.name</name>
22+ <value>localhost</value>
23+ <!-- <value>pleiades</value> -->
24+ </property>
25+ <property>
26+ <name>this.domain</name>
27+ <value>localhost</value>
28+ <!-- <value>grid.example.com</value> -->
29+ </property>
30+ <property>
31+ <name>this.realm</name>
32+ <value>LOCALDOMAIN</value>
33+ <!-- <value>GRID.EXAMPLE.COM</value> -->
34+ </property>
35+ <property>
36+ <name>this.keytab.dir</name>
37+ <value>/grid/etc/keytabs/${this.cluster.name}</value>
38+ </property>
39+ <property>
40+ <name>this.namenode.fqdn</name>
41+ <value>localhost</value>
42+ <!-- <value>${this.cluster.name}-nn.${this.domain}</value> -->
43+ </property>
44+
45+ <property>
46+ <name>fs.defaultFS</name>
47+ <value>hdfs://localhost:9000</value>
48+ </property>
49+ <property>
50+ <name>hadoop.tmp.dir</name>
51+ <value>/tmp/hadoop-${user.name}</value>
52+ </property>
53+
54+ <property>
55+ <name>hadoop.security.authentication</name>
56+ <value>kerberos</value>
57+ <description>
58+ Set the authentication for the cluster. Valid values are: simple or
59+ kerberos.
60+ </description>
61+ </property>
62+ <property>
63+ <name>hadoop.security.authorization</name>
64+ <value>true</value>
65+ <description>
66+ Enable authorization for different protocols.
67+ </description>
68+ </property>
69+<!--
70+ <property>
71+ <name>hadoop.security.auth_to_local</name>
72+ <value>
73+ RULE:[2:$1@$0](.*@${this.realm})s/@.*//
74+ RULE:[1:$1@$0](.*@${this.realm})s/@.*//
75+ RULE:[2:$1@$0](hdfs@.*${this.realm})s/.*/hdfs/
76+ RULE:[2:$1@$0](yarn@.*${this.realm})s/.*/yarn/
77+ RULE:[2:$1@$0](mapred@.*${this.realm})s/.*/mapred/
78+ DEFAULT
79+ </value>
80+ </property>
81+-->
82+ <property>
83+ <name>hadoop.security.group.mapping</name>
84+ <value>org.apache.hadoop.security.JniBasedUnixGroupsMapping</value>
85+ </property>
86+<!--
87+ <property>
88+ <name>hadoop.security.groups.cache.secs</name>
89+ <value>14400</value>
90+ </property>
91+ <property>
92+ <name>hadoop.kerberos.kinit.command</name>
93+ <value>/usr/bin/kinit</value>
94+ </property>
95+-->
96+
97+ <property>
98+ <name>hadoop.http.filter.initializers</name>
99+ <value>org.apache.hadoop.security.AuthenticationFilterInitializer</value>
100+ <!-- <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value> -->
101+ <description>The name of a class that initializes an input filter for Jetty.
102+ This filter will always return Dr.Who as the web user when the servlets
103+ query for the authenticated user </description>
104+ </property>
105+ <property>
106+ <name>hadoop.http.authentication.signature.secret.file</name>
107+ <value>/grid/etc/hadoop-http-auth-signature-secret</value>
108+ </property>
109+ <property>
110+ <name>hadoop.http.authentication.cookie.domain</name>
111+ <value>${this.domain}</value>
112+ </property>
113+ <property>
114+ <name>hadoop.http.authentication.type</name>
115+ <value>kerberos</value>
116+ <description>Defines authentication used for the HTTP web-consoles.
117+ The supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#.
118+ The dfeault value is simple.</description>
119+ </property>
120+ <property>
121+ <name>hadoop.http.authentication.kerberos.principal</name>
122+ <value>HTTP/localhost@${this.realm}</value>
123+ <!-- <value>HTTP/_HOST@${this.realm}</value>
124+ _HOST N/A!: v1.0, HDP1.2; OK: v2.0, CDH3, CDH4 -->
125+ </property>
126+ <property>
127+ <name>hadoop.http.authentication.kerberos.keytab</name>
128+ <value>${this.keytab.dir}/HTTP.keytab</value>
129+ </property>
130+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/capacity-scheduler.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/capacity-scheduler.xml (revision 539)
@@ -0,0 +1,111 @@
1+<!--
2+ Licensed under the Apache License, Version 2.0 (the "License");
3+ you may not use this file except in compliance with the License.
4+ You may obtain a copy of the License at
5+
6+ http://www.apache.org/licenses/LICENSE-2.0
7+
8+ Unless required by applicable law or agreed to in writing, software
9+ distributed under the License is distributed on an "AS IS" BASIS,
10+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+ See the License for the specific language governing permissions and
12+ limitations under the License. See accompanying LICENSE file.
13+-->
14+<configuration>
15+
16+ <property>
17+ <name>yarn.scheduler.capacity.maximum-applications</name>
18+ <value>10000</value>
19+ <description>
20+ Maximum number of applications that can be pending and running.
21+ </description>
22+ </property>
23+
24+ <property>
25+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
26+ <value>0.1</value>
27+ <description>
28+ Maximum percent of resources in the cluster which can be used to run
29+ application masters i.e. controls number of concurrent running
30+ applications.
31+ </description>
32+ </property>
33+
34+ <property>
35+ <name>yarn.scheduler.capacity.resource-calculator</name>
36+ <value>org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator</value>
37+ <description>
38+ The ResourceCalculator implementation to be used to compare
39+ Resources in the scheduler.
40+ The default i.e. DefaultResourceCalculator only uses Memory while
41+ DominantResourceCalculator uses dominant-resource to compare
42+ multi-dimensional resources such as Memory, CPU etc.
43+ </description>
44+ </property>
45+
46+ <property>
47+ <name>yarn.scheduler.capacity.root.queues</name>
48+ <value>default</value>
49+ <description>
50+ The queues at the this level (root is the root queue).
51+ </description>
52+ </property>
53+
54+ <property>
55+ <name>yarn.scheduler.capacity.root.default.capacity</name>
56+ <value>100</value>
57+ <description>Default queue target capacity.</description>
58+ </property>
59+
60+ <property>
61+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
62+ <value>1</value>
63+ <description>
64+ Default queue user limit a percentage from 0.0 to 1.0.
65+ </description>
66+ </property>
67+
68+ <property>
69+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
70+ <value>100</value>
71+ <description>
72+ The maximum capacity of the default queue.
73+ </description>
74+ </property>
75+
76+ <property>
77+ <name>yarn.scheduler.capacity.root.default.state</name>
78+ <value>RUNNING</value>
79+ <description>
80+ The state of the default queue. State can be one of RUNNING or STOPPED.
81+ </description>
82+ </property>
83+
84+ <property>
85+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
86+ <value>*</value>
87+ <description>
88+ The ACL of who can submit jobs to the default queue.
89+ </description>
90+ </property>
91+
92+ <property>
93+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
94+ <value> hadoop,gridops</value>
95+ <description>
96+ The ACL of who can administer jobs on the default queue.
97+ </description>
98+ </property>
99+
100+ <property>
101+ <name>yarn.scheduler.capacity.node-locality-delay</name>
102+ <value>-1</value>
103+ <description>
104+ Number of missed scheduling opportunities after which the CapacityScheduler
105+ attempts to schedule rack-local containers.
106+ Typically this should be set to number of racks in the cluster, this
107+ feature is disabled by default, set to -1.
108+ </description>
109+ </property>
110+
111+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/hadoop-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/hadoop-env.sh (revision 539)
@@ -0,0 +1,114 @@
1+# Copyright 2011 The Apache Software Foundation
2+#
3+# Licensed to the Apache Software Foundation (ASF) under one
4+# or more contributor license agreements. See the NOTICE file
5+# distributed with this work for additional information
6+# regarding copyright ownership. The ASF licenses this file
7+# to you under the Apache License, Version 2.0 (the
8+# "License"); you may not use this file except in compliance
9+# with the License. You may obtain a copy of the License at
10+#
11+# http://www.apache.org/licenses/LICENSE-2.0
12+#
13+# Unless required by applicable law or agreed to in writing, software
14+# distributed under the License is distributed on an "AS IS" BASIS,
15+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+# See the License for the specific language governing permissions and
17+# limitations under the License.
18+
19+# Set Hadoop-specific environment variables here.
20+
21+
22+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk
23+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-i386
24+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-amd64
25+export JAVA_HOME=/usr/local/jvm/java-6-ora
26+#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-i386
27+#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
28+#export JAVA_HOME=/usr/local/jvm/java-7-ora
29+# The directory where pid files are stored. /tmp by default.
30+export HADOOP_PID_DIR=/grid/vol/0/var/run/${USER}
31+# Where log files are stored. $HADOOP_PREFIX/logs by default.
32+#export HADOOP_LOG_DIR=/grid/vol/0/var/log/${USER}
33+if [ x"$USER" = x'root' ]; then
34+ export HADOOP_LOG_DIR=/grid/vol/0/var/log/hdfs
35+else
36+ export HADOOP_LOG_DIR=/grid/vol/0/var/log/${USER}
37+fi
38+
39+export HADOOP_SECURE_DN_USER=hdfs
40+# This property is N/A or overridden by the HADOOP_PID_DIR
41+#export HADOOP_SECURE_DN_PID_DIR=/grid/vol/0/var/run/${HADOOP_SECURE_DN_USER}
42+# This property is N/A or overridden by the HADOOP_LOG_DIR
43+#export HADOOP_SECURE_DN_LOG_DIR=/grid/vol/0/var/log/${HADOOP_SECURE_DN_USER}
44+export JSVC_HOME=/usr/bin
45+#export JSVC_HOME=/grid/usr/hadoop/sbin
46+# Extra Java CLASSPATH elements. Optional.
47+if [ x"$HADOOP_CLASSPATH" = x ]; then
48+ export HADOOP_CLASSPATH=/usr/share/java/commons-daemon.jar
49+ #export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/grid/usr/commons-daemon-1.0.13/commons-daemon-1.0.13.jar
50+else
51+ # for Hive and HCatalog
52+ export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/share/java/commons-daemon.jar
53+ #export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/grid/usr/commons-daemon-1.0.13/commons-daemon-1.0.13.jar
54+fi
55+export HADOOP_USER_CLASSPATH_FIRST=true
56+
57+
58+# The only required environment variable is JAVA_HOME. All others are
59+# optional. When running a distributed configuration it is best to
60+# set JAVA_HOME in this file, so that it is correctly defined on
61+# remote nodes.
62+
63+# The java implementation to use.
64+export JAVA_HOME=${JAVA_HOME}
65+
66+# The jsvc implementation to use. Jsvc is required to run secure datanodes.
67+#export JSVC_HOME=${JSVC_HOME}
68+
69+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
70+
71+# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
72+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
73+ if [ "$HADOOP_CLASSPATH" ]; then
74+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
75+ else
76+ export HADOOP_CLASSPATH=$f
77+ fi
78+done
79+
80+# The maximum amount of heap to use, in MB. Default is 1000.
81+#export HADOOP_HEAPSIZE=
82+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
83+
84+# Extra Java runtime options. Empty by default.
85+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
86+
87+# Command specific options appended to HADOOP_OPTS when specified
88+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
89+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
90+
91+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
92+
93+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
94+export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
95+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
96+
97+# On secure datanodes, user to run the datanode as after dropping privileges
98+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
99+
100+# Where log files are stored. $HADOOP_HOME/logs by default.
101+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
102+
103+# Where log files are stored in the secure data environment.
104+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
105+
106+# The directory where pid files are stored. /tmp by default.
107+# NOTE: this should be set to a directory that can only be written to by
108+# the user that will run the hadoop daemons. Otherwise there is the
109+# potential for a symlink attack.
110+export HADOOP_PID_DIR=${HADOOP_PID_DIR}
111+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
112+
113+# A string representing this instance of hadoop. $USER by default.
114+export HADOOP_IDENT_STRING=$USER
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/mapred-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/mapred-site.xml (revision 539)
@@ -0,0 +1,57 @@
1+<?xml version="1.0"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.jobhistory.fqdn</name>
22+ <value>localhost</value>
23+ <!-- <value>${this.cluster.name}-jh.${this.domain}</value> -->
24+ </property>
25+
26+ <property>
27+ <name>mapreduce.framework.name</name>
28+ <value>yarn</value>
29+ <description>The runtime framework for executing MapReduce jobs.
30+ Can be one of local, classic or yarn.
31+ (default: local)
32+ </description>
33+ </property>
34+ <property>
35+ <name>yarn.app.mapreduce.am.staging-dir</name>
36+ <value>/user</value>
37+ </property>
38+ <property>
39+ <name>mapreduce.jobhistory.intermediate-done-dir</name>
40+ <value>/grid/vol/0/var/lib/mapred/history/done_intermediate</value>
41+ <!-- NG: <value>/user</value> -->
42+ </property>
43+ <property>
44+ <name>mapreduce.jobhistory.done-dir</name>
45+ <value>/grid/vol/0/var/lib/mapred/history/done</value>
46+ </property>
47+
48+ <property>
49+ <name>mapreduce.jobhistory.principal</name>
50+ <value>mapred/${this.jobhistory.fqdn}@${this.realm}</value>
51+ <!-- <value>mapred/_HOST@${this.realm}</value> -->
52+ </property>
53+ <property>
54+ <name>mapreduce.jobhistory.keytab</name>
55+ <value>${this.keytab.dir}/jh.keytab</value>
56+ </property>
57+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/yarn-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/yarn-site.xml (revision 539)
@@ -0,0 +1,156 @@
1+<?xml version="1.0"?>
2+<!--
3+ Licensed under the Apache License, Version 2.0 (the "License");
4+ you may not use this file except in compliance with the License.
5+ You may obtain a copy of the License at
6+
7+ http://www.apache.org/licenses/LICENSE-2.0
8+
9+ Unless required by applicable law or agreed to in writing, software
10+ distributed under the License is distributed on an "AS IS" BASIS,
11+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+ See the License for the specific language governing permissions and
13+ limitations under the License. See accompanying LICENSE file.
14+-->
15+<configuration>
16+
17+<!-- Site specific YARN configuration properties -->
18+ <property>
19+ <name>this.resourcemanager.fqdn</name>
20+ <value>localhost</value>
21+ <!-- <value>${this.cluster.name}-rm.${this.domain}</value> -->
22+ </property>
23+
24+ <property>
25+ <name>yarn.log-aggregation-enable</name>
26+ <value>true</value>
27+ </property>
28+ <property>
29+ <name>yarn.resourcemanager.scheduler.class</name>
30+ <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
31+ <description>In case you do not want to use the default scheduler</description>
32+ </property>
33+ <property>
34+ <name>yarn.nodemanager.local-dirs</name>
35+ <value>/grid/vol/0/var/lib/${user.name}/nm/local</value>
36+ <!-- <value>/grid/vol/0/var/lib/${user.name}/nm/local,/grid/vol/1/var/lib/${user.name}/nm/local</value> -->
37+ <description>the local directories used by the nodemanager
38+ (default: /tmp/nm-local-dir)</description>
39+ </property>
40+ <property>
41+ <name>yarn.nodemanager.resource.memory-mb</name>
42+ <value>8192</value>
43+ <description>Amount of physical memory, in MB, that can be allocated
44+ for containers. (default: 8192)</description>
45+ </property>
46+ <property>
47+ <name>yarn.nodemanager.resource.cpu-cores</name>
48+ <value>8</value>
49+ <description>Number of CPU cores that can be allocated
50+ for containers. (default: 8)</description>
51+ </property>
52+ <property>
53+ <name>yarn.nodemanager.remote-app-log-dir</name>
54+ <value>/grid/vol/0/var/log/${user.name}/nm</value>
55+ <description>directory on hdfs where the application logs are moved to
56+ (default: /tmp/logs)</description>
57+ </property>
58+ <property>
59+ <name>yarn.nodemanager.log-dirs</name>
60+ <value>/grid/vol/0/var/log/${user.name}/nm</value>
61+ <!-- <value>/grid/vol/0/var/log/${user.name}/nm,/grid/vol/1/var/log/${user.name}/nm</value> -->
62+ <description>the directories used by Nodemanagers as log directories
63+ (default: /tmp/logs)</description>
64+ </property>
65+ <property>
66+ <name>yarn.nodemanager.aux-services</name>
67+ <value>mapreduce.shuffle</value>
68+ <description>shuffle service that needs to be set for Map Reduce to run</description>
69+ </property>
70+
71+ <!--
72+ <property>
73+ <name>yarn.resourcemanager.nodes.include-path</name>
74+ <value>/grid/usr/hadoop/etc/hadoop/hosts.include</value>
75+ <description>Path to file with nodes to include.</description>
76+ </property>
77+ <property>
78+ <name>yarn.resourcemanager.nodes.exclude-path</name>
79+ <value>/grid/usr/hadoop/etc/hadoop/hosts.exclude</value>
80+ <description>Path to file with nodes to exclude.</description>
81+ </property>
82+ -->
83+
84+ <property>
85+ <name>yarn.acl.enable</name>
86+ <value>true</value>
87+ </property>
88+ <property>
89+ <name>yarn.admin.acl</name>
90+ <value> yarn,gridops</value>
91+ </property>
92+ <property>
93+ <name>yarn.resourcemanager.principal</name>
94+ <value>yarn/${this.resourcemanager.fqdn}@${this.realm}</value>
95+ <!-- <value>yarn/_HOST@${this.realm}</value> -->
96+ </property>
97+ <property>
98+ <name>yarn.resourcemanager.keytab</name>
99+ <value>${this.keytab.dir}/rm.keytab</value>
100+ </property>
101+ <property>
102+ <name>yarn.nodemanager.principal</name>
103+ <value>yarn/localhost@${this.realm}</value>
104+ <!-- <value>yarn/_HOST@${this.realm}</value> -->
105+ </property>
106+ <property>
107+ <name>yarn.nodemanager.keytab</name>
108+ <value>${this.keytab.dir}/nm.keytab</value>
109+ </property>
110+ <property>
111+ <name>yarn.nodemanager.admin-env</name>
112+ <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX,LD_LIBRARY_PATH=${HADOOP_COMMON_HOME}/lib/native</value>
113+ </property>
114+
115+ <property>
116+ <name>yarn.nodemanager.container-executor.class</name>
117+ <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
118+ </property>
119+ <property>
120+ <name>yarn.nodemanager.linux-container-executor.group</name>
121+ <value>yarn</value>
122+ </property>
123+ <property>
124+ <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
125+ <!--
126+ <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
127+ -->
128+ <value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value>
129+ <description>The class which should help the LCE handle resources.</description>
130+ </property>
131+ <!--
132+ <property>
133+ <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
134+ <value>/hadoop-yarn</value>
135+ <description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas).
136+ If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have
137+ been pre-configured), then this cgroups hierarchy must already exist and be writable by the
138+ NodeManager user, otherwise the NodeManager may fail.
139+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
140+ </property>
141+ <property>
142+ <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
143+ <value>false</value>
144+ <description>Whether the LCE should attempt to mount cgroups if not found.
145+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
146+ </property>
147+ <property>
148+ <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
149+ <description>Where the LCE should attempt to mount cgroups if not found. Common locations
150+ include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux
151+ distribution in use. This path must exist before the NodeManager is launched.
152+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and
153+ yarn.nodemanager.linux-container-executor.cgroups.mount is true.</description>
154+ </property>
155+ -->
156+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/hadoop-policy.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/hadoop-policy.xml (revision 539)
@@ -0,0 +1,219 @@
1+<?xml version="1.0"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+
5+ Copyright 2011 The Apache Software Foundation
6+
7+ Licensed to the Apache Software Foundation (ASF) under one
8+ or more contributor license agreements. See the NOTICE file
9+ distributed with this work for additional information
10+ regarding copyright ownership. The ASF licenses this file
11+ to you under the Apache License, Version 2.0 (the
12+ "License"); you may not use this file except in compliance
13+ with the License. You may obtain a copy of the License at
14+
15+ http://www.apache.org/licenses/LICENSE-2.0
16+
17+ Unless required by applicable law or agreed to in writing, software
18+ distributed under the License is distributed on an "AS IS" BASIS,
19+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20+ See the License for the specific language governing permissions and
21+ limitations under the License.
22+
23+-->
24+
25+<!-- Put site-specific property overrides in this file. -->
26+
27+<configuration>
28+ <property>
29+ <name>security.client.protocol.acl</name>
30+ <value>*</value>
31+ <description>ACL for ClientProtocol, which is used by user code
32+ via the DistributedFileSystem.
33+ The ACL is a comma-separated list of user and group names. The user and
34+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
35+ A special value of "*" means all users are allowed.</description>
36+ </property>
37+
38+ <property>
39+ <name>security.client.datanode.protocol.acl</name>
40+ <value>*</value>
41+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
42+ for block recovery.
43+ The ACL is a comma-separated list of user and group names. The user and
44+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
45+ A special value of "*" means all users are allowed.</description>
46+ </property>
47+
48+ <property>
49+ <name>security.datanode.protocol.acl</name>
50+ <value>*</value>
51+ <description>ACL for DatanodeProtocol, which is used by datanodes to
52+ communicate with the namenode.
53+ The ACL is a comma-separated list of user and group names. The user and
54+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
55+ A special value of "*" means all users are allowed.</description>
56+ </property>
57+
58+ <property>
59+ <name>security.inter.datanode.protocol.acl</name>
60+ <value>*</value>
61+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
62+ for updating generation timestamp.
63+ The ACL is a comma-separated list of user and group names. The user and
64+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
65+ A special value of "*" means all users are allowed.</description>
66+ </property>
67+
68+ <property>
69+ <name>security.namenode.protocol.acl</name>
70+ <value>*</value>
71+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
72+ namenode to communicate with the namenode.
73+ The ACL is a comma-separated list of user and group names. The user and
74+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
75+ A special value of "*" means all users are allowed.</description>
76+ </property>
77+
78+ <property>
79+ <name>security.admin.operations.protocol.acl</name>
80+ <value>*</value>
81+ <description>ACL for AdminOperationsProtocol. Used for admin commands.
82+ The ACL is a comma-separated list of user and group names. The user and
83+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
84+ A special value of "*" means all users are allowed.</description>
85+ </property>
86+
87+ <property>
88+ <name>security.refresh.usertogroups.mappings.protocol.acl</name>
89+ <value>*</value>
90+ <description>ACL for RefreshUserMappingsProtocol. Used to refresh
91+ users mappings. The ACL is a comma-separated list of user and
92+ group names. The user and group list is separated by a blank. For
93+ e.g. "alice,bob users,wheel". A special value of "*" means all
94+ users are allowed.</description>
95+ </property>
96+
97+ <property>
98+ <name>security.refresh.policy.protocol.acl</name>
99+ <value>*</value>
100+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
101+ dfsadmin and mradmin commands to refresh the security policy in-effect.
102+ The ACL is a comma-separated list of user and group names. The user and
103+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
104+ A special value of "*" means all users are allowed.</description>
105+ </property>
106+
107+ <property>
108+ <name>security.ha.service.protocol.acl</name>
109+ <value>*</value>
110+ <description>ACL for HAService protocol used by HAAdmin to manage the
111+ active and stand-by states of namenode.</description>
112+ </property>
113+
114+ <property>
115+ <name>security.zkfc.protocol.acl</name>
116+ <value>*</value>
117+ <description>ACL for access to the ZK Failover Controller
118+ </description>
119+ </property>
120+
121+ <property>
122+ <name>security.qjournal.service.protocol.acl</name>
123+ <value>*</value>
124+ <description>ACL for QJournalProtocol, used by the NN to communicate with
125+ JNs when using the QuorumJournalManager for edit logs.</description>
126+ </property>
127+
128+ <property>
129+ <name>security.mrhs.client.protocol.acl</name>
130+ <value>*</value>
131+ <description>ACL for HSClientProtocol, used by job clients to
132+ communciate with the MR History Server job status etc.
133+ The ACL is a comma-separated list of user and group names. The user and
134+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
135+ A special value of "*" means all users are allowed.</description>
136+ </property>
137+
138+ <!-- YARN Protocols -->
139+
140+ <property>
141+ <name>security.resourcetracker.protocol.acl</name>
142+ <value>*</value>
143+ <description>ACL for ResourceTracker protocol, used by the
144+ ResourceManager and NodeManager to communicate with each other.
145+ The ACL is a comma-separated list of user and group names. The user and
146+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
147+ A special value of "*" means all users are allowed.</description>
148+ </property>
149+
150+ <property>
151+ <name>security.admin.protocol.acl</name>
152+ <value>*</value>
153+ <description>ACL for RMAdminProtocol, for admin commands.
154+ The ACL is a comma-separated list of user and group names. The user and
155+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
156+ A special value of "*" means all users are allowed.</description>
157+ </property>
158+
159+ <property>
160+ <name>security.client.resourcemanager.protocol.acl</name>
161+ <value>*</value>
162+ <description>ACL for ClientRMProtocol, used by the ResourceManager
163+ and applications submission clients to communicate with each other.
164+ The ACL is a comma-separated list of user and group names. The user and
165+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
166+ A special value of "*" means all users are allowed.</description>
167+ </property>
168+
169+ <property>
170+ <name>security.applicationmaster.resourcemanager.protocol.acl</name>
171+ <value>*</value>
172+ <description>ACL for AMRMProtocol, used by the ResourceManager
173+ and ApplicationMasters to communicate with each other.
174+ The ACL is a comma-separated list of user and group names. The user and
175+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
176+ A special value of "*" means all users are allowed.</description>
177+ </property>
178+
179+ <property>
180+ <name>security.containermanager.protocol.acl</name>
181+ <value>*</value>
182+ <description>ACL for ContainerManager protocol, used by the NodeManager
183+ and ApplicationMasters to communicate with each other.
184+ The ACL is a comma-separated list of user and group names. The user and
185+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
186+ A special value of "*" means all users are allowed.</description>
187+ </property>
188+
189+ <property>
190+ <name>security.resourcelocalizer.protocol.acl</name>
191+ <value>*</value>
192+ <description>ACL for ResourceLocalizer protocol, used by the NodeManager
193+ and ResourceLocalizer to communicate with each other.
194+ The ACL is a comma-separated list of user and group names. The user and
195+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
196+ A special value of "*" means all users are allowed.</description>
197+ </property>
198+
199+ <property>
200+ <name>security.job.task.protocol.acl</name>
201+ <value>*</value>
202+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
203+ tasks to communicate with the parent tasktracker.
204+ The ACL is a comma-separated list of user and group names. The user and
205+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
206+ A special value of "*" means all users are allowed.</description>
207+ </property>
208+
209+ <property>
210+ <name>security.job.client.protocol.acl</name>
211+ <value>*</value>
212+ <description>ACL for MRClientProtocol, used by job clients to
213+ communciate with the MR ApplicationMaster to query job status etc.
214+ The ACL is a comma-separated list of user and group names. The user and
215+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
216+ A special value of "*" means all users are allowed.</description>
217+ </property>
218+
219+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/mapred-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/mapred-env.sh (revision 539)
@@ -0,0 +1,42 @@
1+# Licensed to the Apache Software Foundation (ASF) under one or more
2+# contributor license agreements. See the NOTICE file distributed with
3+# this work for additional information regarding copyright ownership.
4+# The ASF licenses this file to You under the Apache License, Version 2.0
5+# (the "License"); you may not use this file except in compliance with
6+# the License. You may obtain a copy of the License at
7+#
8+# http://www.apache.org/licenses/LICENSE-2.0
9+#
10+# Unless required by applicable law or agreed to in writing, software
11+# distributed under the License is distributed on an "AS IS" BASIS,
12+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+# See the License for the specific language governing permissions and
14+# limitations under the License.
15+
16+
17+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk
18+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-i386
19+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-amd64
20+export JAVA_HOME=/usr/local/jvm/java-6-ora
21+#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-i386
22+#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
23+#export JAVA_HOME=/usr/local/jvm/java-7-ora
24+
25+# The directory where pid files are stored. /tmp by default.
26+export HADOOP_MAPRED_PID_DIR=/grid/vol/0/var/run/${USER}
27+# Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
28+export HADOOP_MAPRED_LOG_DIR=/grid/vol/0/var/log/${USER}
29+
30+
31+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
32+
33+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
34+
35+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
36+
37+#export HADOOP_JOB_HISTORYSERVER_OPTS=
38+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
39+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
40+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
41+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
42+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/yarn-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/yarn-env.sh (revision 539)
@@ -0,0 +1,99 @@
1+# Licensed to the Apache Software Foundation (ASF) under one or more
2+# contributor license agreements. See the NOTICE file distributed with
3+# this work for additional information regarding copyright ownership.
4+# The ASF licenses this file to You under the Apache License, Version 2.0
5+# (the "License"); you may not use this file except in compliance with
6+# the License. You may obtain a copy of the License at
7+#
8+# http://www.apache.org/licenses/LICENSE-2.0
9+#
10+# Unless required by applicable law or agreed to in writing, software
11+# distributed under the License is distributed on an "AS IS" BASIS,
12+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+# See the License for the specific language governing permissions and
14+# limitations under the License.
15+
16+
17+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk
18+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-i386
19+#export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-amd64
20+export JAVA_HOME=/usr/local/jvm/java-6-ora
21+#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-i386
22+#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
23+#export JAVA_HOME=/usr/local/jvm/java-7-ora
24+
25+export HADOOP_PREFIX=/grid/usr/hadoop
26+#export HADOOP_CONF_DIR=${HADOOP_PREFIX}/etc/hadoop
27+export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
28+export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
29+
30+export YARN_HOME=${HADOOP_PREFIX}
31+export YARN_PID_DIR=/grid/vol/0/var/run/${USER}
32+export YARN_LOG_DIR=/grid/vol/0/var/log/${USER}
33+
34+export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}
35+
36+
37+# User for YARN daemons
38+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
39+
40+# resolve links - $0 may be a softlink
41+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
42+
43+# some Java parameters
44+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
45+if [ "$JAVA_HOME" != "" ]; then
46+ #echo "run java in $JAVA_HOME"
47+ JAVA_HOME=$JAVA_HOME
48+fi
49+
50+if [ "$JAVA_HOME" = "" ]; then
51+ echo "Error: JAVA_HOME is not set."
52+ exit 1
53+fi
54+
55+JAVA=$JAVA_HOME/bin/java
56+JAVA_HEAP_MAX=-Xmx1000m
57+
58+# check envvars which might override default args
59+if [ "$YARN_HEAPSIZE" != "" ]; then
60+ #echo "run with heapsize $YARN_HEAPSIZE"
61+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
62+ #echo $JAVA_HEAP_MAX
63+fi
64+
65+# so that filenames w/ spaces are handled correctly in loops below
66+IFS=
67+
68+
69+# default log directory & file
70+if [ "$YARN_LOG_DIR" = "" ]; then
71+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
72+fi
73+if [ "$YARN_LOGFILE" = "" ]; then
74+ YARN_LOGFILE='yarn.log'
75+fi
76+
77+# default policy file for service-level authorization
78+if [ "$YARN_POLICYFILE" = "" ]; then
79+ YARN_POLICYFILE="hadoop-policy.xml"
80+fi
81+
82+# restore ordinary behaviour
83+unset IFS
84+
85+
86+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
87+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
88+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
89+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
90+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
91+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
92+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
93+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
94+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
95+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
96+fi
97+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
98+
99+
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/httpfs-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/httpfs-env.sh (revision 539)
@@ -0,0 +1,41 @@
1+#!/bin/bash
2+#
3+# Licensed under the Apache License, Version 2.0 (the "License");
4+# you may not use this file except in compliance with the License.
5+# You may obtain a copy of the License at
6+#
7+# http://www.apache.org/licenses/LICENSE-2.0
8+#
9+# Unless required by applicable law or agreed to in writing, software
10+# distributed under the License is distributed on an "AS IS" BASIS,
11+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+# See the License for the specific language governing permissions and
13+# limitations under the License. See accompanying LICENSE file.
14+#
15+
16+# Set httpfs specific environment variables here.
17+
18+# Settings for the Embedded Tomcat that runs HttpFS
19+# Java System properties for HttpFS should be specified in this variable
20+#
21+# export CATALINA_OPTS=
22+
23+# HttpFS logs directory
24+#
25+# export HTTPFS_LOG=${HTTPFS_HOME}/logs
26+
27+# HttpFS temporary directory
28+#
29+# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
30+
31+# The HTTP port used by HttpFS
32+#
33+# export HTTPFS_HTTP_PORT=14000
34+
35+# The Admin port used by HttpFS
36+#
37+# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
38+
39+# The hostname HttpFS server runs on
40+#
41+# export HTTPFS_HTTP_HOSTNAME=`hostname -f`
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/httpfs-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/httpfs-site.xml (revision 539)
@@ -0,0 +1,17 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<!--
3+ Licensed under the Apache License, Version 2.0 (the "License");
4+ you may not use this file except in compliance with the License.
5+ You may obtain a copy of the License at
6+
7+ http://www.apache.org/licenses/LICENSE-2.0
8+
9+ Unless required by applicable law or agreed to in writing, software
10+ distributed under the License is distributed on an "AS IS" BASIS,
11+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+ See the License for the specific language governing permissions and
13+ limitations under the License.
14+-->
15+<configuration>
16+
17+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/configuration.xsl (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/etc/hadoop/configuration.xsl (revision 539)
@@ -0,0 +1,40 @@
1+<?xml version="1.0"?>
2+<!--
3+ Licensed to the Apache Software Foundation (ASF) under one or more
4+ contributor license agreements. See the NOTICE file distributed with
5+ this work for additional information regarding copyright ownership.
6+ The ASF licenses this file to You under the Apache License, Version 2.0
7+ (the "License"); you may not use this file except in compliance with
8+ the License. You may obtain a copy of the License at
9+
10+ http://www.apache.org/licenses/LICENSE-2.0
11+
12+ Unless required by applicable law or agreed to in writing, software
13+ distributed under the License is distributed on an "AS IS" BASIS,
14+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+ See the License for the specific language governing permissions and
16+ limitations under the License.
17+-->
18+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
19+<xsl:output method="html"/>
20+<xsl:template match="configuration">
21+<html>
22+<body>
23+<table border="1">
24+<tr>
25+ <td>name</td>
26+ <td>value</td>
27+ <td>description</td>
28+</tr>
29+<xsl:for-each select="property">
30+<tr>
31+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
32+ <td><xsl:value-of select="value"/></td>
33+ <td><xsl:value-of select="description"/></td>
34+</tr>
35+</xsl:for-each>
36+</table>
37+</body>
38+</html>
39+</xsl:template>
40+</xsl:stylesheet>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/default.tsv (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/default.tsv (revision 539)
@@ -0,0 +1,327 @@
1+name value
2+dfs.ha.fencing.ssh.connect-timeout 30000
3+file.blocksize 67108864
4+file.bytes-per-checksum 512
5+file.client-write-packet-size 65536
6+file.replication 1
7+file.stream-buffer-size 4096
8+fs.AbstractFileSystem.file.impl org.apache.hadoop.fs.local.LocalFs
9+fs.AbstractFileSystem.hdfs.impl org.apache.hadoop.fs.Hdfs
10+fs.AbstractFileSystem.viewfs.impl org.apache.hadoop.fs.viewfs.ViewFs
11+fs.automatic.close true
12+fs.defaultFS hdfs://localhost:9000
13+fs.df.interval 60000
14+fs.ftp.host 0.0.0.0
15+fs.ftp.host.port 21
16+fs.permissions.umask-mode 22
17+fs.s3.block.size 67108864
18+fs.s3.buffer.dir ${hadoop.tmp.dir}/s3
19+fs.s3.maxRetries 4
20+fs.s3.sleepTimeSeconds 10
21+fs.s3n.block.size 67108864
22+fs.trash.checkpoint.interval 0
23+fs.trash.interval 0
24+ftp.blocksize 67108864
25+ftp.bytes-per-checksum 512
26+ftp.client-write-packet-size 65536
27+ftp.replication 3
28+ftp.stream-buffer-size 4096
29+ha.failover-controller.cli-check.rpc-timeout.ms 20000
30+ha.failover-controller.graceful-fence.connection.retries 1
31+ha.failover-controller.graceful-fence.rpc-timeout.ms 5000
32+ha.failover-controller.new-active.rpc-timeout.ms 60000
33+ha.health-monitor.check-interval.ms 1000
34+ha.health-monitor.connect-retry-interval.ms 1000
35+ha.health-monitor.rpc-timeout.ms 45000
36+ha.health-monitor.sleep-after-disconnect.ms 1000
37+ha.zookeeper.acl world:anyone:rwcda
38+ha.zookeeper.parent-znode /hadoop-ha
39+ha.zookeeper.session-timeout.ms 5000
40+hadoop.common.configuration.version 0.23.0
41+hadoop.http.authentication.kerberos.keytab ${user.home}/hadoop.keytab
42+hadoop.http.authentication.kerberos.principal HTTP/_HOST@LOCALHOST
43+hadoop.http.authentication.signature.secret.file ${user.home}/hadoop-http-auth-signature-secret
44+hadoop.http.authentication.simple.anonymous.allowed true
45+hadoop.http.authentication.token.validity 36000
46+hadoop.http.authentication.type simple
47+hadoop.http.filter.initializers org.apache.hadoop.http.lib.StaticUserWebFilter
48+hadoop.http.staticuser.user dr.who
49+hadoop.jetty.logs.serve.aliases true
50+hadoop.kerberos.kinit.command kinit
51+hadoop.rpc.protection authentication
52+hadoop.rpc.socket.factory.class.default org.apache.hadoop.net.StandardSocketFactory
53+hadoop.security.authentication simple
54+hadoop.security.authorization false
55+hadoop.security.group.mapping org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback
56+hadoop.security.group.mapping.ldap.search.attr.group.name cn
57+hadoop.security.group.mapping.ldap.search.attr.member member
58+hadoop.security.group.mapping.ldap.search.filter.group (objectClass=group)
59+hadoop.security.group.mapping.ldap.search.filter.user (&(objectClass=user)(sAMAccountName={0}))
60+hadoop.security.group.mapping.ldap.ssl false
61+hadoop.security.groups.cache.secs 300
62+hadoop.security.instrumentation.requires.admin false
63+hadoop.security.uid.cache.secs 14400
64+hadoop.ssl.client.conf ssl-client.xml
65+hadoop.ssl.enabled false
66+hadoop.ssl.hostname.verifier DEFAULT
67+hadoop.ssl.keystores.factory.class org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory
68+hadoop.ssl.require.client.cert false
69+hadoop.ssl.server.conf ssl-server.xml
70+hadoop.tmp.dir /tmp/hadoop-${user.name}
71+hadoop.util.hash.type murmur
72+hadoop.work.around.non.threadsafe.getpwuid false
73+io.bytes.per.checksum 512
74+io.compression.codec.bzip2.library system-native
75+io.file.buffer.size 4096
76+io.map.index.interval 128
77+io.map.index.skip 0
78+io.mapfile.bloom.error.rate 0.005
79+io.mapfile.bloom.size 1048576
80+io.native.lib.available true
81+io.seqfile.compress.blocksize 1000000
82+io.seqfile.lazydecompress true
83+io.seqfile.local.dir ${hadoop.tmp.dir}/io/local
84+io.seqfile.sorter.recordlimit 1000000
85+io.serializations org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization
86+io.skip.checksum.errors false
87+ipc.client.connect.max.retries 10
88+ipc.client.connect.max.retries.on.timeouts 45
89+ipc.client.connect.timeout 20000
90+ipc.client.connection.maxidletime 10000
91+ipc.client.idlethreshold 4000
92+ipc.client.kill.max 10
93+ipc.client.tcpnodelay false
94+ipc.server.listen.queue.size 128
95+ipc.server.tcpnodelay false
96+kfs.blocksize 67108864
97+kfs.bytes-per-checksum 512
98+kfs.client-write-packet-size 65536
99+kfs.replication 3
100+kfs.stream-buffer-size 4096
101+map.sort.class org.apache.hadoop.util.QuickSort
102+mapred.child.java.opts -Xmx200m
103+mapreduce.admin.user.env LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native
104+mapreduce.application.classpath $HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*
105+mapreduce.client.completion.pollinterval 5000
106+mapreduce.client.output.filter FAILED
107+mapreduce.client.progressmonitor.pollinterval 1000
108+mapreduce.client.submit.file.replication 10
109+mapreduce.cluster.acls.enabled false
110+mapreduce.cluster.local.dir ${hadoop.tmp.dir}/mapred/local
111+mapreduce.cluster.temp.dir ${hadoop.tmp.dir}/mapred/temp
112+mapreduce.framework.name yarn
113+mapreduce.ifile.readahead true
114+mapreduce.ifile.readahead.bytes 4194304
115+mapreduce.input.fileinputformat.split.minsize 0
116+mapreduce.job.acl-modify-job
117+mapreduce.job.acl-view-job
118+mapreduce.job.classloader false
119+mapreduce.job.classloader.system.classes java.,javax.,org.apache.commons.logging.,org.apache.log4j.,org.apache.hadoop.
120+mapreduce.job.committer.setup.cleanup.needed true
121+mapreduce.job.complete.cancel.delegation.tokens true
122+mapreduce.job.counters.max 120
123+mapreduce.job.end-notification.max.attempts 5
124+mapreduce.job.end-notification.max.retry.interval 5000
125+mapreduce.job.end-notification.retry.attempts 0
126+mapreduce.job.end-notification.retry.interval 1000
127+mapreduce.job.hdfs-servers ${fs.defaultFS}
128+mapreduce.job.jvm.numtasks 1
129+mapreduce.job.map.output.collector.class org.apache.hadoop.mapred.MapTask$MapOutputBuffer
130+mapreduce.job.maps 2
131+mapreduce.job.maxtaskfailures.per.tracker 3
132+mapreduce.job.queuename default
133+mapreduce.job.reduce.shuffle.consumer.plugin.class org.apache.hadoop.mapreduce.task.reduce.Shuffle
134+mapreduce.job.reduce.slowstart.completedmaps 0.05
135+mapreduce.job.reduces 1
136+mapreduce.job.speculative.slownodethreshold 1
137+mapreduce.job.speculative.slowtaskthreshold 1
138+mapreduce.job.speculative.speculativecap 0.1
139+mapreduce.job.split.metainfo.maxsize 10000000
140+mapreduce.job.ubertask.enable false
141+mapreduce.job.ubertask.maxmaps 9
142+mapreduce.job.ubertask.maxreduces 1
143+mapreduce.job.userlog.retain.hours 24
144+mapreduce.jobhistory.address 0.0.0.0:10020
145+mapreduce.jobhistory.keytab /etc/security/keytab/jhs.service.keytab
146+mapreduce.jobhistory.principal jhs/_HOST@REALM.TLD
147+mapreduce.jobhistory.webapp.address 0.0.0.0:19888
148+mapreduce.jobtracker.address local
149+mapreduce.jobtracker.expire.trackers.interval 600000
150+mapreduce.jobtracker.handler.count 10
151+mapreduce.jobtracker.heartbeats.in.second 100
152+mapreduce.jobtracker.http.address 0.0.0.0:50030
153+mapreduce.jobtracker.instrumentation org.apache.hadoop.mapred.JobTrackerMetricsInst
154+mapreduce.jobtracker.jobhistory.block.size 3145728
155+mapreduce.jobtracker.jobhistory.lru.cache.size 5
156+mapreduce.jobtracker.jobhistory.task.numberprogresssplits 12
157+mapreduce.jobtracker.maxtasks.perjob -1
158+mapreduce.jobtracker.persist.jobstatus.active true
159+mapreduce.jobtracker.persist.jobstatus.dir /jobtracker/jobsInfo
160+mapreduce.jobtracker.persist.jobstatus.hours 1
161+mapreduce.jobtracker.restart.recover false
162+mapreduce.jobtracker.retiredjobs.cache.size 1000
163+mapreduce.jobtracker.staging.root.dir ${hadoop.tmp.dir}/mapred/staging
164+mapreduce.jobtracker.system.dir ${hadoop.tmp.dir}/mapred/system
165+mapreduce.jobtracker.taskcache.levels 2
166+mapreduce.jobtracker.taskscheduler org.apache.hadoop.mapred.JobQueueTaskScheduler
167+mapreduce.jobtracker.tasktracker.maxblacklists 4
168+mapreduce.local.clientfactory.class.name org.apache.hadoop.mapred.LocalClientFactory
169+mapreduce.map.cpu.vcores 1
170+mapreduce.map.log.level INFO
171+mapreduce.map.maxattempts 4
172+mapreduce.map.output.compress false
173+mapreduce.map.output.compress.codec org.apache.hadoop.io.compress.DefaultCodec
174+mapreduce.map.skip.maxrecords 0
175+mapreduce.map.skip.proc.count.autoincr true
176+mapreduce.map.sort.spill.percent 0.8
177+mapreduce.map.speculative true
178+mapreduce.output.fileoutputformat.compress false
179+mapreduce.output.fileoutputformat.compress.codec org.apache.hadoop.io.compress.DefaultCodec
180+mapreduce.output.fileoutputformat.compress.type RECORD
181+mapreduce.reduce.cpu.vcores 1
182+mapreduce.reduce.input.buffer.percent 0
183+mapreduce.reduce.log.level INFO
184+mapreduce.reduce.markreset.buffer.percent 0
185+mapreduce.reduce.maxattempts 4
186+mapreduce.reduce.merge.inmem.threshold 1000
187+mapreduce.reduce.shuffle.connect.timeout 180000
188+mapreduce.reduce.shuffle.input.buffer.percent 0.7
189+mapreduce.reduce.shuffle.memory.limit.percent 0.25
190+mapreduce.reduce.shuffle.merge.percent 0.66
191+mapreduce.reduce.shuffle.parallelcopies 5
192+mapreduce.reduce.shuffle.read.timeout 180000
193+mapreduce.reduce.shuffle.retry-delay.max.ms 60000
194+mapreduce.reduce.skip.maxgroups 0
195+mapreduce.reduce.skip.proc.count.autoincr true
196+mapreduce.reduce.speculative true
197+mapreduce.shuffle.max.connections 0
198+mapreduce.shuffle.port 8080
199+mapreduce.shuffle.ssl.enabled false
200+mapreduce.shuffle.ssl.file.buffer.size 65536
201+mapreduce.task.files.preserve.failedtasks false
202+mapreduce.task.io.sort.factor 10
203+mapreduce.task.io.sort.mb 100
204+mapreduce.task.merge.progress.records 10000
205+mapreduce.task.profile false
206+mapreduce.task.profile.maps 0-2
207+mapreduce.task.profile.reduces 0-2
208+mapreduce.task.skip.start.attempts 2
209+mapreduce.task.timeout 600000
210+mapreduce.task.tmp.dir ./tmp
211+mapreduce.task.userlog.limit.kb 0
212+mapreduce.tasktracker.dns.interface default
213+mapreduce.tasktracker.dns.nameserver default
214+mapreduce.tasktracker.healthchecker.interval 60000
215+mapreduce.tasktracker.healthchecker.script.timeout 600000
216+mapreduce.tasktracker.http.address 0.0.0.0:50060
217+mapreduce.tasktracker.http.threads 40
218+mapreduce.tasktracker.indexcache.mb 10
219+mapreduce.tasktracker.instrumentation org.apache.hadoop.mapred.TaskTrackerMetricsInst
220+mapreduce.tasktracker.local.dir.minspacekill 0
221+mapreduce.tasktracker.local.dir.minspacestart 0
222+mapreduce.tasktracker.map.tasks.maximum 2
223+mapreduce.tasktracker.outofband.heartbeat false
224+mapreduce.tasktracker.reduce.tasks.maximum 2
225+mapreduce.tasktracker.report.address 127.0.0.1:0
226+mapreduce.tasktracker.taskcontroller org.apache.hadoop.mapred.DefaultTaskController
227+mapreduce.tasktracker.taskmemorymanager.monitoringinterval 5000
228+mapreduce.tasktracker.tasks.sleeptimebeforesigkill 5000
229+net.topology.node.switch.mapping.impl org.apache.hadoop.net.ScriptBasedMapping
230+net.topology.script.number.args 100
231+s3.blocksize 67108864
232+s3.bytes-per-checksum 512
233+s3.client-write-packet-size 65536
234+s3.replication 3
235+s3.stream-buffer-size 4096
236+s3native.blocksize 67108864
237+s3native.bytes-per-checksum 512
238+s3native.client-write-packet-size 65536
239+s3native.replication 3
240+s3native.stream-buffer-size 4096
241+tfile.fs.input.buffer.size 262144
242+tfile.fs.output.buffer.size 262144
243+tfile.io.chunk.size 1048576
244+yarn.acl.enable true
245+yarn.admin.acl *
246+yarn.am.liveness-monitor.expiry-interval-ms 600000
247+yarn.app.mapreduce.am.command-opts -Xmx1024m
248+yarn.app.mapreduce.am.job.committer.cancel-timeout 60000
249+yarn.app.mapreduce.am.job.committer.commit-window 10000
250+yarn.app.mapreduce.am.job.task.listener.thread-count 30
251+yarn.app.mapreduce.am.resource.cpu-vcores 1
252+yarn.app.mapreduce.am.resource.mb 1536
253+yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms 1000
254+yarn.app.mapreduce.am.staging-dir /tmp/hadoop-yarn/staging
255+yarn.app.mapreduce.client-am.ipc.max-retries 1
256+yarn.app.mapreduce.client.max-retries 3
257+yarn.application.classpath $HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*
258+yarn.ipc.rpc.class org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC
259+yarn.ipc.serializer.type protocolbuffers
260+yarn.log-aggregation-enable false
261+yarn.log-aggregation.retain-check-interval-seconds -1
262+yarn.log-aggregation.retain-seconds -1
263+yarn.nm.liveness-monitor.expiry-interval-ms 600000
264+yarn.nodemanager.address 0.0.0.0:0
265+yarn.nodemanager.admin-env MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
266+yarn.nodemanager.aux-services.mapreduce.shuffle.class org.apache.hadoop.mapred.ShuffleHandler
267+yarn.nodemanager.container-executor.class org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor
268+yarn.nodemanager.container-manager.thread-count 20
269+yarn.nodemanager.container-monitor.interval-ms 3000
270+yarn.nodemanager.delete.debug-delay-sec 0
271+yarn.nodemanager.delete.thread-count 4
272+yarn.nodemanager.disk-health-checker.interval-ms 120000
273+yarn.nodemanager.disk-health-checker.min-healthy-disks 0.25
274+yarn.nodemanager.env-whitelist JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME
275+yarn.nodemanager.health-checker.interval-ms 600000
276+yarn.nodemanager.health-checker.script.timeout-ms 1200000
277+yarn.nodemanager.heartbeat.interval-ms 1000
278+yarn.nodemanager.keytab /etc/krb5.keytab
279+yarn.nodemanager.linux-container-executor.cgroups.hierarchy /hadoop-yarn
280+yarn.nodemanager.linux-container-executor.cgroups.mount false
281+yarn.nodemanager.linux-container-executor.resources-handler.class org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler
282+yarn.nodemanager.local-dirs ${hadoop.tmp.dir}/nm-local-dir
283+yarn.nodemanager.localizer.address 0.0.0.0:8040
284+yarn.nodemanager.localizer.cache.cleanup.interval-ms 600000
285+yarn.nodemanager.localizer.cache.target-size-mb 10240
286+yarn.nodemanager.localizer.client.thread-count 5
287+yarn.nodemanager.localizer.fetch.thread-count 4
288+yarn.nodemanager.log-aggregation.compression-type none
289+yarn.nodemanager.log-dirs ${yarn.log.dir}/userlogs
290+yarn.nodemanager.log.retain-seconds 10800
291+yarn.nodemanager.pmem-check-enabled true
292+yarn.nodemanager.process-kill-wait.ms 2000
293+yarn.nodemanager.remote-app-log-dir /tmp/logs
294+yarn.nodemanager.remote-app-log-dir-suffix logs
295+yarn.nodemanager.resource.cpu-cores 8
296+yarn.nodemanager.resource.memory-mb 8192
297+yarn.nodemanager.sleep-delay-before-sigkill.ms 250
298+yarn.nodemanager.vcores-pcores-ratio 2
299+yarn.nodemanager.vmem-check-enabled true
300+yarn.nodemanager.vmem-pmem-ratio 2.1
301+yarn.nodemanager.webapp.address 0.0.0.0:8042
302+yarn.resourcemanager.address 0.0.0.0:8032
303+yarn.resourcemanager.admin.address 0.0.0.0:8033
304+yarn.resourcemanager.admin.client.thread-count 1
305+yarn.resourcemanager.am.max-retries 1
306+yarn.resourcemanager.amliveliness-monitor.interval-ms 1000
307+yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs 86400
308+yarn.resourcemanager.client.thread-count 50
309+yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs 86400
310+yarn.resourcemanager.container.liveness-monitor.interval-ms 600000
311+yarn.resourcemanager.delayed.delegation-token.removal-interval-ms 30000
312+yarn.resourcemanager.fs.rm-state-store.uri ${hadoop.tmp.dir}/yarn/system/rmstore
313+yarn.resourcemanager.keytab /etc/krb5.keytab
314+yarn.resourcemanager.max-completed-applications 10000
315+yarn.resourcemanager.nm.liveness-monitor.interval-ms 1000
316+yarn.resourcemanager.recovery.enabled false
317+yarn.resourcemanager.resource-tracker.address 0.0.0.0:8031
318+yarn.resourcemanager.resource-tracker.client.thread-count 50
319+yarn.resourcemanager.scheduler.address 0.0.0.0:8030
320+yarn.resourcemanager.scheduler.class org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
321+yarn.resourcemanager.scheduler.client.thread-count 50
322+yarn.resourcemanager.store.class org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
323+yarn.resourcemanager.webapp.address 0.0.0.0:8088
324+yarn.scheduler.maximum-allocation-mb 8192
325+yarn.scheduler.maximum-allocation-vcores 32
326+yarn.scheduler.minimum-allocation-mb 1024
327+yarn.scheduler.minimum-allocation-vcores 1
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/default.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/default.xml (revision 539)
@@ -0,0 +1,330 @@
1+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<configuration>
4+<property><name>mapreduce.job.ubertask.enable</name><value>false</value><source>mapred-default.xml</source></property>
5+<property><name>yarn.resourcemanager.max-completed-applications</name><value>10000</value><source>yarn-default.xml</source></property>
6+<property><name>yarn.resourcemanager.delayed.delegation-token.removal-interval-ms</name><value>30000</value><source>yarn-default.xml</source></property>
7+<property><name>io.bytes.per.checksum</name><value>512</value><source>core-default.xml</source></property>
8+<property><name>mapreduce.client.submit.file.replication</name><value>10</value><source>mapred-default.xml</source></property>
9+<property><name>yarn.nodemanager.container-manager.thread-count</name><value>20</value><source>yarn-default.xml</source></property>
10+<property><name>yarn.nodemanager.pmem-check-enabled</name><value>true</value><source>yarn-default.xml</source></property>
11+<property><name>mapreduce.tasktracker.healthchecker.interval</name><value>60000</value><source>mapred-default.xml</source></property>
12+<property><name>mapreduce.jobtracker.staging.root.dir</name><value>${hadoop.tmp.dir}/mapred/staging</value><source>mapred-default.xml</source></property>
13+<property><name>yarn.resourcemanager.recovery.enabled</name><value>false</value><source>yarn-default.xml</source></property>
14+<property><name>yarn.resourcemanager.am.max-retries</name><value>1</value><source>yarn-default.xml</source></property>
15+<property><name>fs.AbstractFileSystem.file.impl</name><value>org.apache.hadoop.fs.local.LocalFs</value><source>core-default.xml</source></property>
16+<property><name>mapreduce.client.completion.pollinterval</name><value>5000</value><source>mapred-default.xml</source></property>
17+<property><name>mapreduce.job.ubertask.maxreduces</name><value>1</value><source>mapred-default.xml</source></property>
18+<property><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value><source>mapred-default.xml</source></property>
19+<property><name>hadoop.ssl.keystores.factory.class</name><value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value><source>core-default.xml</source></property>
20+<property><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value><source>core-default.xml</source></property>
21+<property><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value><source>yarn-default.xml</source></property>
22+<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value><source>core-default.xml</source></property>
23+<property><name>s3.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
24+<property><name>mapreduce.task.io.sort.factor</name><value>10</value><source>mapred-default.xml</source></property>
25+<property><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value><source>yarn-default.xml</source></property>
26+<property><name>yarn.admin.acl</name><value>*</value><source>yarn-default.xml</source></property>
27+<property><name>mapreduce.job.speculative.speculativecap</name><value>0.1</value><source>mapred-default.xml</source></property>
28+<property><name>yarn.nodemanager.resource.memory-mb</name><value>8192</value><source>yarn-default.xml</source></property>
29+<property><name>io.map.index.interval</name><value>128</value><source>core-default.xml</source></property>
30+<property><name>s3.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
31+<property><name>mapreduce.task.files.preserve.failedtasks</name><value>false</value><source>mapred-default.xml</source></property>
32+<property><name>ha.zookeeper.session-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
33+<property><name>s3.replication</name><value>3</value><source>core-default.xml</source></property>
34+<property><name>mapreduce.reduce.shuffle.connect.timeout</name><value>180000</value><source>mapred-default.xml</source></property>
35+<property><name>hadoop.ssl.enabled</name><value>false</value><source>core-default.xml</source></property>
36+<property><name>mapreduce.job.counters.max</name><value>120</value><source>mapred-default.xml</source></property>
37+<property><name>ipc.client.connect.max.retries.on.timeouts</name><value>45</value><source>core-default.xml</source></property>
38+<property><name>mapreduce.job.complete.cancel.delegation.tokens</name><value>true</value><source>mapred-default.xml</source></property>
39+<property><name>fs.trash.interval</name><value>0</value><source>core-default.xml</source></property>
40+<property><name>yarn.resourcemanager.admin.address</name><value>0.0.0.0:8033</value><source>yarn-default.xml</source></property>
41+<property><name>ha.health-monitor.check-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
42+<property><name>hadoop.jetty.logs.serve.aliases</name><value>true</value><source>core-default.xml</source></property>
43+<property><name>hadoop.http.authentication.kerberos.principal</name><value>HTTP/_HOST@LOCALHOST</value><source>core-default.xml</source></property>
44+<property><name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name><value>5000</value><source>mapred-default.xml</source></property>
45+<property><name>mapreduce.job.reduce.shuffle.consumer.plugin.class</name><value>org.apache.hadoop.mapreduce.task.reduce.Shuffle</value><source>mapred-default.xml</source></property>
46+<property><name>s3native.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
47+<property><name>ha.health-monitor.sleep-after-disconnect.ms</name><value>1000</value><source>core-default.xml</source></property>
48+<property><name>yarn.log-aggregation.retain-check-interval-seconds</name><value>-1</value><source>yarn-default.xml</source></property>
49+<property><name>mapreduce.jobtracker.jobhistory.task.numberprogresssplits</name><value>12</value><source>mapred-default.xml</source></property>
50+<property><name>mapreduce.map.cpu.vcores</name><value>1</value><source>mapred-default.xml</source></property>
51+<property><name>yarn.acl.enable</name><value>true</value><source>yarn-default.xml</source></property>
52+<property><name>hadoop.security.instrumentation.requires.admin</name><value>false</value><source>core-default.xml</source></property>
53+<property><name>yarn.nodemanager.localizer.fetch.thread-count</name><value>4</value><source>yarn-default.xml</source></property>
54+<property><name>hadoop.security.authorization</name><value>false</value><source>core-default.xml</source></property>
55+<property><name>hadoop.security.group.mapping.ldap.search.filter.group</name><value>(objectClass=group)</value><source>core-default.xml</source></property>
56+<property><name>mapreduce.output.fileoutputformat.compress.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value><source>mapred-default.xml</source></property>
57+<property><name>mapreduce.shuffle.max.connections</name><value>0</value><source>mapred-default.xml</source></property>
58+<property><name>mapreduce.shuffle.port</name><value>8080</value><source>mapred-default.xml</source></property>
59+<property><name>mapreduce.reduce.log.level</name><value>INFO</value><source>mapred-default.xml</source></property>
60+<property><name>yarn.log-aggregation-enable</name><value>false</value><source>yarn-default.xml</source></property>
61+<property><name>mapreduce.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value><source>mapred-default.xml</source></property>
62+<property><name>hadoop.security.group.mapping.ldap.search.attr.group.name</name><value>cn</value><source>core-default.xml</source></property>
63+<property><name>s3native.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
64+<property><name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name><value>5000</value><source>mapred-default.xml</source></property>
65+<property><name>tfile.fs.output.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
66+<property><name>yarn.nodemanager.local-dirs</name><value>${hadoop.tmp.dir}/nm-local-dir</value><source>yarn-default.xml</source></property>
67+<property><name>mapreduce.jobtracker.persist.jobstatus.active</name><value>true</value><source>mapred-default.xml</source></property>
68+<property><name>fs.AbstractFileSystem.hdfs.impl</name><value>org.apache.hadoop.fs.Hdfs</value><source>core-default.xml</source></property>
69+<property><name>mapreduce.job.map.output.collector.class</name><value>org.apache.hadoop.mapred.MapTask$MapOutputBuffer</value><source>mapred-default.xml</source></property>
70+<property><name>mapreduce.tasktracker.local.dir.minspacestart</name><value>0</value><source>mapred-default.xml</source></property>
71+<property><name>hadoop.security.uid.cache.secs</name><value>14400</value><source>core-default.xml</source></property>
72+<property><name>hadoop.ssl.client.conf</name><value>ssl-client.xml</value><source>core-default.xml</source></property>
73+<property><name>mapreduce.tasktracker.local.dir.minspacekill</name><value>0</value><source>mapred-default.xml</source></property>
74+<property><name>mapreduce.jobtracker.retiredjobs.cache.size</name><value>1000</value><source>mapred-default.xml</source></property>
75+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value><source>yarn-default.xml</source></property>
76+<property><name>mapreduce.job.reduce.slowstart.completedmaps</name><value>0.05</value><source>mapred-default.xml</source></property>
77+<property><name>mapreduce.job.end-notification.retry.attempts</name><value>0</value><source>mapred-default.xml</source></property>
78+<property><name>mapreduce.tasktracker.outofband.heartbeat</name><value>false</value><source>mapred-default.xml</source></property>
79+<property><name>io.native.lib.available</name><value>true</value><source>core-default.xml</source></property>
80+<property><name>mapreduce.jobtracker.persist.jobstatus.hours</name><value>1</value><source>mapred-default.xml</source></property>
81+<property><name>mapreduce.client.progressmonitor.pollinterval</name><value>1000</value><source>mapred-default.xml</source></property>
82+<property><name>mapreduce.reduce.input.buffer.percent</name><value>0.0</value><source>mapred-default.xml</source></property>
83+<property><name>mapreduce.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value><source>mapred-default.xml</source></property>
84+<property><name>mapreduce.map.skip.proc.count.autoincr</name><value>true</value><source>mapred-default.xml</source></property>
85+<property><name>mapreduce.jobtracker.address</name><value>local</value><source>mapred-default.xml</source></property>
86+<property><name>mapreduce.cluster.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value><source>mapred-default.xml</source></property>
87+<property><name>mapreduce.tasktracker.taskcontroller</name><value>org.apache.hadoop.mapred.DefaultTaskController</value><source>mapred-default.xml</source></property>
88+<property><name>mapreduce.reduce.shuffle.parallelcopies</name><value>5</value><source>mapred-default.xml</source></property>
89+<property><name>yarn.nodemanager.env-whitelist</name><value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value><source>yarn-default.xml</source></property>
90+<property><name>mapreduce.jobtracker.heartbeats.in.second</name><value>100</value><source>mapred-default.xml</source></property>
91+<property><name>mapreduce.job.maxtaskfailures.per.tracker</name><value>3</value><source>mapred-default.xml</source></property>
92+<property><name>ipc.client.connection.maxidletime</name><value>10000</value><source>core-default.xml</source></property>
93+<property><name>mapreduce.shuffle.ssl.enabled</name><value>false</value><source>mapred-default.xml</source></property>
94+<property><name>fs.s3.sleepTimeSeconds</name><value>10</value><source>core-default.xml</source></property>
95+<property><name>yarn.scheduler.maximum-allocation-vcores</name><value>32</value><source>yarn-default.xml</source></property>
96+<property><name>hadoop.ssl.server.conf</name><value>ssl-server.xml</value><source>core-default.xml</source></property>
97+<property><name>ha.zookeeper.parent-znode</name><value>/hadoop-ha</value><source>core-default.xml</source></property>
98+<property><name>io.seqfile.lazydecompress</name><value>true</value><source>core-default.xml</source></property>
99+<property><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value><source>mapred-default.xml</source></property>
100+<property><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value><source>mapred-default.xml</source></property>
101+<property><name>ipc.client.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
102+<property><name>s3.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
103+<property><name>mapreduce.jobtracker.tasktracker.maxblacklists</name><value>4</value><source>mapred-default.xml</source></property>
104+<property><name>mapreduce.job.jvm.numtasks</name><value>1</value><source>mapred-default.xml</source></property>
105+<property><name>mapreduce.task.io.sort.mb</name><value>100</value><source>mapred-default.xml</source></property>
106+<property><name>io.file.buffer.size</name><value>4096</value><source>core-default.xml</source></property>
107+<property><name>yarn.nodemanager.admin-env</name><value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value><source>yarn-default.xml</source></property>
108+<property><name>mapreduce.job.split.metainfo.maxsize</name><value>10000000</value><source>mapred-default.xml</source></property>
109+<property><name>kfs.replication</name><value>3</value><source>core-default.xml</source></property>
110+<property><name>yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms</name><value>1000</value><source>mapred-default.xml</source></property>
111+<property><name>mapreduce.reduce.maxattempts</name><value>4</value><source>mapred-default.xml</source></property>
112+<property><name>kfs.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
113+<property><name>hadoop.security.authentication</name><value>simple</value><source>core-default.xml</source></property>
114+<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value><source>core-default.xml</source></property>
115+<property><name>mapreduce.jobtracker.taskscheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value><source>mapred-default.xml</source></property>
116+<property><name>yarn.app.mapreduce.am.job.task.listener.thread-count</name><value>30</value><source>mapred-default.xml</source></property>
117+<property><name>mapreduce.job.reduces</name><value>1</value><source>mapred-default.xml</source></property>
118+<property><name>mapreduce.map.sort.spill.percent</name><value>0.80</value><source>mapred-default.xml</source></property>
119+<property><name>mapreduce.job.end-notification.retry.interval</name><value>1000</value><source>mapred-default.xml</source></property>
120+<property><name>mapreduce.job.maps</name><value>2</value><source>mapred-default.xml</source></property>
121+<property><name>mapreduce.job.speculative.slownodethreshold</name><value>1.0</value><source>mapred-default.xml</source></property>
122+<property><name>tfile.fs.input.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
123+<property><name>mapreduce.map.speculative</name><value>true</value><source>mapred-default.xml</source></property>
124+<property><name>mapreduce.job.acl-view-job</name><value> </value><source>mapred-default.xml</source></property>
125+<property><name>mapreduce.reduce.shuffle.retry-delay.max.ms</name><value>60000</value><source>mapred-default.xml</source></property>
126+<property><name>mapreduce.job.end-notification.max.retry.interval</name><value>5000</value><source>mapred-default.xml</source></property>
127+<property><name>yarn.ipc.serializer.type</name><value>protocolbuffers</value><source>yarn-default.xml</source></property>
128+<property><name>ftp.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
129+<property><name>mapreduce.tasktracker.http.threads</name><value>40</value><source>mapred-default.xml</source></property>
130+<property><name>ha.failover-controller.cli-check.rpc-timeout.ms</name><value>20000</value><source>core-default.xml</source></property>
131+<property><name>mapreduce.task.skip.start.attempts</name><value>2</value><source>mapred-default.xml</source></property>
132+<property><name>mapreduce.jobtracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value><source>mapred-default.xml</source></property>
133+<property><name>ipc.client.kill.max</name><value>10</value><source>core-default.xml</source></property>
134+<property><name>yarn.nodemanager.linux-container-executor.cgroups.mount</name><value>false</value><source>yarn-default.xml</source></property>
135+<property><name>mapreduce.jobhistory.keytab</name><value>/etc/security/keytab/jhs.service.keytab</value><source>mapred-default.xml</source></property>
136+<property><name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name><value>/hadoop-yarn</value><source>yarn-default.xml</source></property>
137+<property><name>mapreduce.job.end-notification.max.attempts</name><value>5</value><source>mapred-default.xml</source></property>
138+<property><name>mapreduce.task.tmp.dir</name><value>./tmp</value><source>mapred-default.xml</source></property>
139+<property><name>kfs.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
140+<property><name>hadoop.http.filter.initializers</name><value>org.apache.hadoop.http.lib.StaticUserWebFilter</value><source>core-default.xml</source></property>
141+<property><name>hadoop.http.authentication.type</name><value>simple</value><source>core-default.xml</source></property>
142+<property><name>yarn.resourcemanager.client.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
143+<property><name>ipc.server.listen.queue.size</name><value>128</value><source>core-default.xml</source></property>
144+<property><name>mapreduce.reduce.skip.maxgroups</name><value>0</value><source>mapred-default.xml</source></property>
145+<property><name>file.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
146+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value><source>yarn-default.xml</source></property>
147+<property><name>io.mapfile.bloom.size</name><value>1048576</value><source>core-default.xml</source></property>
148+<property><name>yarn.nodemanager.container-executor.class</name><value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value><source>yarn-default.xml</source></property>
149+<property><name>mapreduce.map.maxattempts</name><value>4</value><source>mapred-default.xml</source></property>
150+<property><name>mapreduce.jobtracker.jobhistory.block.size</name><value>3145728</value><source>mapred-default.xml</source></property>
151+<property><name>yarn.log-aggregation.retain-seconds</name><value>-1</value><source>yarn-default.xml</source></property>
152+<property><name>yarn.app.mapreduce.am.job.committer.cancel-timeout</name><value>60000</value><source>mapred-default.xml</source></property>
153+<property><name>ftp.replication</name><value>3</value><source>core-default.xml</source></property>
154+<property><name>mapreduce.jobtracker.http.address</name><value>0.0.0.0:50030</value><source>mapred-default.xml</source></property>
155+<property><name>yarn.nodemanager.health-checker.script.timeout-ms</name><value>1200000</value><source>yarn-default.xml</source></property>
156+<property><name>mapreduce.jobhistory.address</name><value>0.0.0.0:10020</value><source>mapred-default.xml</source></property>
157+<property><name>mapreduce.jobtracker.taskcache.levels</name><value>2</value><source>mapred-default.xml</source></property>
158+<property><name>mapreduce.application.classpath</name><value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value><source>mapred-default.xml</source></property>
159+<property><name>yarn.nodemanager.log.retain-seconds</name><value>10800</value><source>yarn-default.xml</source></property>
160+<property><name>mapred.child.java.opts</name><value>-Xmx200m</value><source>mapred-default.xml</source></property>
161+<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value><source>mapred-default.xml</source></property>
162+<property><name>hadoop.util.hash.type</name><value>murmur</value><source>core-default.xml</source></property>
163+<property><name>mapreduce.reduce.skip.proc.count.autoincr</name><value>true</value><source>mapred-default.xml</source></property>
164+<property><name>yarn.nodemanager.container-monitor.interval-ms</name><value>3000</value><source>yarn-default.xml</source></property>
165+<property><name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name><value>0.25</value><source>yarn-default.xml</source></property>
166+<property><name>kfs.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
167+<property><name>ha.zookeeper.acl</name><value>world:anyone:rwcda</value><source>core-default.xml</source></property>
168+<property><name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name><value>250</value><source>yarn-default.xml</source></property>
169+<property><name>io.map.index.skip</name><value>0</value><source>core-default.xml</source></property>
170+<property><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value><source>core-default.xml</source></property>
171+<property><name>fs.s3.maxRetries</name><value>4</value><source>core-default.xml</source></property>
172+<property><name>ha.failover-controller.new-active.rpc-timeout.ms</name><value>60000</value><source>core-default.xml</source></property>
173+<property><name>s3native.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
174+<property><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
175+<property><name>hadoop.http.staticuser.user</name><value>dr.who</value><source>core-default.xml</source></property>
176+<property><name>mapreduce.reduce.speculative</name><value>true</value><source>mapred-default.xml</source></property>
177+<property><name>mapreduce.client.output.filter</name><value>FAILED</value><source>mapred-default.xml</source></property>
178+<property><name>mapreduce.ifile.readahead.bytes</name><value>4194304</value><source>mapred-default.xml</source></property>
179+<property><name>mapreduce.tasktracker.report.address</name><value>127.0.0.1:0</value><source>mapred-default.xml</source></property>
180+<property><name>mapreduce.task.userlog.limit.kb</name><value>0</value><source>mapred-default.xml</source></property>
181+<property><name>mapreduce.tasktracker.map.tasks.maximum</name><value>2</value><source>mapred-default.xml</source></property>
182+<property><name>hadoop.http.authentication.simple.anonymous.allowed</name><value>true</value><source>core-default.xml</source></property>
183+<property><name>mapreduce.job.classloader.system.classes</name><value>java.,javax.,org.apache.commons.logging.,org.apache.log4j.,org.apache.hadoop.</value><source>mapred-default.xml</source></property>
184+<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value><source>core-default.xml</source></property>
185+<property><name>fs.automatic.close</name><value>true</value><source>core-default.xml</source></property>
186+<property><name>mapreduce.tasktracker.healthchecker.script.timeout</name><value>600000</value><source>mapred-default.xml</source></property>
187+<property><name>yarn.resourcemanager.address</name><value>0.0.0.0:8032</value><source>yarn-default.xml</source></property>
188+<property><name>yarn.nodemanager.health-checker.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
189+<property><name>yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs</name><value>86400</value><source>yarn-default.xml</source></property>
190+<property><name>mapreduce.reduce.markreset.buffer.percent</name><value>0.0</value><source>mapred-default.xml</source></property>
191+<property><name>mapreduce.map.log.level</name><value>INFO</value><source>mapred-default.xml</source></property>
192+<property><name>yarn.nodemanager.localizer.address</name><value>0.0.0.0:8040</value><source>yarn-default.xml</source></property>
193+<property><name>ftp.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
194+<property><name>yarn.resourcemanager.keytab</name><value>/etc/krb5.keytab</value><source>yarn-default.xml</source></property>
195+<property><name>ha.health-monitor.rpc-timeout.ms</name><value>45000</value><source>core-default.xml</source></property>
196+<property><name>hadoop.security.group.mapping.ldap.search.attr.member</name><value>member</value><source>core-default.xml</source></property>
197+<property><name>mapreduce.job.classloader</name><value>false</value><source>mapred-default.xml</source></property>
198+<property><name>yarn.nm.liveness-monitor.expiry-interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
199+<property><name>io.compression.codec.bzip2.library</name><value>system-native</value><source>core-default.xml</source></property>
200+<property><name>hadoop.http.authentication.token.validity</name><value>36000</value><source>core-default.xml</source></property>
201+<property><name>yarn.nodemanager.resource.cpu-cores</name><value>8</value><source>yarn-default.xml</source></property>
202+<property><name>yarn.nodemanager.vcores-pcores-ratio</name><value>2</value><source>yarn-default.xml</source></property>
203+<property><name>mapreduce.job.hdfs-servers</name><value>${fs.defaultFS}</value><source>yarn-default.xml</source></property>
204+<property><name>s3native.replication</name><value>3</value><source>core-default.xml</source></property>
205+<property><name>yarn.nodemanager.localizer.client.thread-count</name><value>5</value><source>yarn-default.xml</source></property>
206+<property><name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
207+<property><name>dfs.ha.fencing.ssh.connect-timeout</name><value>30000</value><source>core-default.xml</source></property>
208+<property><name>yarn.am.liveness-monitor.expiry-interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
209+<property><name>mapreduce.task.profile</name><value>false</value><source>mapred-default.xml</source></property>
210+<property><name>yarn.nodemanager.linux-container-executor.resources-handler.class</name><value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value><source>yarn-default.xml</source></property>
211+<property><name>mapreduce.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value><source>mapred-default.xml</source></property>
212+<property><name>mapreduce.tasktracker.http.address</name><value>0.0.0.0:50060</value><source>mapred-default.xml</source></property>
213+<property><name>mapreduce.jobhistory.webapp.address</name><value>0.0.0.0:19888</value><source>mapred-default.xml</source></property>
214+<property><name>yarn.ipc.rpc.class</name><value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value><source>yarn-default.xml</source></property>
215+<property><name>ha.failover-controller.graceful-fence.rpc-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
216+<property><name>yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs</name><value>86400</value><source>yarn-default.xml</source></property>
217+<property><name>kfs.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
218+<property><name>mapreduce.job.ubertask.maxmaps</name><value>9</value><source>mapred-default.xml</source></property>
219+<property><name>yarn.scheduler.maximum-allocation-mb</name><value>8192</value><source>yarn-default.xml</source></property>
220+<property><name>yarn.nodemanager.heartbeat.interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
221+<property><name>mapreduce.job.userlog.retain.hours</name><value>24</value><source>mapred-default.xml</source></property>
222+<property><name>mapreduce.task.timeout</name><value>600000</value><source>mapred-default.xml</source></property>
223+<property><name>mapreduce.framework.name</name><value>yarn</value><source>mapred-site.xml</source></property>
224+<property><name>ipc.client.idlethreshold</name><value>4000</value><source>core-default.xml</source></property>
225+<property><name>ipc.server.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
226+<property><name>ftp.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
227+<property><name>s3.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
228+<property><name>mapreduce.job.speculative.slowtaskthreshold</name><value>1.0</value><source>mapred-default.xml</source></property>
229+<property><name>yarn.nodemanager.localizer.cache.target-size-mb</name><value>10240</value><source>yarn-default.xml</source></property>
230+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/tmp/logs</value><source>yarn-default.xml</source></property>
231+<property><name>fs.s3.block.size</name><value>67108864</value><source>core-default.xml</source></property>
232+<property><name>mapreduce.job.queuename</name><value>default</value><source>mapred-default.xml</source></property>
233+<property><name>yarn.scheduler.minimum-allocation-mb</name><value>1024</value><source>yarn-default.xml</source></property>
234+<property><name>hadoop.rpc.protection</name><value>authentication</value><source>core-default.xml</source></property>
235+<property><name>yarn.app.mapreduce.client-am.ipc.max-retries</name><value>1</value><source>mapred-default.xml</source></property>
236+<property><name>ftp.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
237+<property><name>yarn.nodemanager.address</name><value>0.0.0.0:0</value><source>yarn-default.xml</source></property>
238+<property><name>fs.defaultFS</name><value>hdfs://localhost:9000</value><source>core-site.xml</source></property>
239+<property><name>mapreduce.task.merge.progress.records</name><value>10000</value><source>mapred-default.xml</source></property>
240+<property><name>yarn.resourcemanager.scheduler.client.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
241+<property><name>file.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
242+<property><name>mapreduce.reduce.cpu.vcores</name><value>1</value><source>mapred-default.xml</source></property>
243+<property><name>yarn.nodemanager.delete.thread-count</name><value>4</value><source>yarn-default.xml</source></property>
244+<property><name>yarn.resourcemanager.scheduler.address</name><value>0.0.0.0:8030</value><source>yarn-default.xml</source></property>
245+<property><name>fs.trash.checkpoint.interval</name><value>0</value><source>core-default.xml</source></property>
246+<property><name>hadoop.http.authentication.signature.secret.file</name><value>${user.home}/hadoop-http-auth-signature-secret</value><source>core-default.xml</source></property>
247+<property><name>s3native.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
248+<property><name>mapreduce.reduce.shuffle.read.timeout</name><value>180000</value><source>mapred-default.xml</source></property>
249+<property><name>yarn.app.mapreduce.am.command-opts</name><value>-Xmx1024m</value><source>mapred-default.xml</source></property>
250+<property><name>mapreduce.admin.user.env</name><value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native</value><source>mapred-default.xml</source></property>
251+<property><name>yarn.resourcemanager.fs.rm-state-store.uri</name><value>${hadoop.tmp.dir}/yarn/system/rmstore</value><source>yarn-default.xml</source></property>
252+<property><name>mapreduce.local.clientfactory.class.name</name><value>org.apache.hadoop.mapred.LocalClientFactory</value><source>mapred-default.xml</source></property>
253+<property><name>fs.permissions.umask-mode</name><value>022</value><source>core-default.xml</source></property>
254+<property><name>hadoop.common.configuration.version</name><value>0.23.0</value><source>core-default.xml</source></property>
255+<property><name>mapreduce.tasktracker.dns.interface</name><value>default</value><source>mapred-default.xml</source></property>
256+<property><name>mapreduce.output.fileoutputformat.compress.type</name><value>RECORD</value><source>mapred-default.xml</source></property>
257+<property><name>hadoop.security.group.mapping.ldap.ssl</name><value>false</value><source>core-default.xml</source></property>
258+<property><name>mapreduce.ifile.readahead</name><value>true</value><source>mapred-default.xml</source></property>
259+<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value><source>core-default.xml</source></property>
260+<property><name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name><value>org.apache.hadoop.mapred.ShuffleHandler</value><source>yarn-default.xml</source></property>
261+<property><name>fs.df.interval</name><value>60000</value><source>core-default.xml</source></property>
262+<property><name>mapreduce.reduce.shuffle.input.buffer.percent</name><value>0.70</value><source>mapred-default.xml</source></property>
263+<property><name>io.seqfile.compress.blocksize</name><value>1000000</value><source>core-default.xml</source></property>
264+<property><name>ipc.client.connect.max.retries</name><value>10</value><source>core-default.xml</source></property>
265+<property><name>hadoop.security.groups.cache.secs</name><value>300</value><source>core-default.xml</source></property>
266+<property><name>yarn.nodemanager.process-kill-wait.ms</name><value>2000</value><source>yarn-default.xml</source></property>
267+<property><name>yarn.nodemanager.vmem-check-enabled</name><value>true</value><source>yarn-default.xml</source></property>
268+<property><name>yarn.application.classpath</name><value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value><source>yarn-default.xml</source></property>
269+<property><name>yarn.app.mapreduce.client.max-retries</name><value>3</value><source>mapred-default.xml</source></property>
270+<property><name>yarn.nodemanager.log-aggregation.compression-type</name><value>none</value><source>yarn-default.xml</source></property>
271+<property><name>hadoop.security.group.mapping.ldap.search.filter.user</name><value>(&amp;(objectClass=user)(sAMAccountName={0}))</value><source>core-default.xml</source></property>
272+<property><name>yarn.nodemanager.localizer.cache.cleanup.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
273+<property><name>yarn.nodemanager.log-dirs</name><value>${yarn.log.dir}/userlogs</value><source>yarn-default.xml</source></property>
274+<property><name>fs.s3n.block.size</name><value>67108864</value><source>core-default.xml</source></property>
275+<property><name>fs.ftp.host</name><value>0.0.0.0</value><source>core-default.xml</source></property>
276+<property><name>hadoop.security.group.mapping</name><value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value><source>core-default.xml</source></property>
277+<property><name>yarn.app.mapreduce.am.resource.cpu-vcores</name><value>1</value><source>mapred-default.xml</source></property>
278+<property><name>mapreduce.map.skip.maxrecords</name><value>0</value><source>mapred-default.xml</source></property>
279+<property><name>yarn.scheduler.minimum-allocation-vcores</name><value>1</value><source>yarn-default.xml</source></property>
280+<property><name>file.replication</name><value>1</value><source>core-default.xml</source></property>
281+<property><name>yarn.resourcemanager.resource-tracker.address</name><value>0.0.0.0:8031</value><source>yarn-default.xml</source></property>
282+<property><name>mapreduce.jobtracker.restart.recover</name><value>false</value><source>mapred-default.xml</source></property>
283+<property><name>hadoop.work.around.non.threadsafe.getpwuid</name><value>false</value><source>core-default.xml</source></property>
284+<property><name>mapreduce.tasktracker.indexcache.mb</name><value>10</value><source>mapred-default.xml</source></property>
285+<property><name>mapreduce.output.fileoutputformat.compress</name><value>false</value><source>mapred-default.xml</source></property>
286+<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value><source>core-default.xml</source></property>
287+<property><name>hadoop.kerberos.kinit.command</name><value>kinit</value><source>core-default.xml</source></property>
288+<property><name>mapreduce.job.committer.setup.cleanup.needed</name><value>true</value><source>mapred-default.xml</source></property>
289+<property><name>mapreduce.task.profile.reduces</name><value>0-2</value><source>mapred-default.xml</source></property>
290+<property><name>file.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
291+<property><name>mapreduce.jobtracker.handler.count</name><value>10</value><source>mapred-default.xml</source></property>
292+<property><name>yarn.app.mapreduce.am.job.committer.commit-window</name><value>10000</value><source>mapred-default.xml</source></property>
293+<property><name>net.topology.script.number.args</name><value>100</value><source>core-default.xml</source></property>
294+<property><name>mapreduce.task.profile.maps</name><value>0-2</value><source>mapred-default.xml</source></property>
295+<property><name>yarn.resourcemanager.webapp.address</name><value>0.0.0.0:8088</value><source>yarn-default.xml</source></property>
296+<property><name>mapreduce.jobtracker.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value><source>mapred-default.xml</source></property>
297+<property><name>hadoop.ssl.hostname.verifier</name><value>DEFAULT</value><source>core-default.xml</source></property>
298+<property><name>yarn.nodemanager.vmem-pmem-ratio</name><value>2.1</value><source>yarn-default.xml</source></property>
299+<property><name>ipc.client.connect.timeout</name><value>20000</value><source>core-default.xml</source></property>
300+<property><name>mapreduce.jobhistory.principal</name><value>jhs/_HOST@REALM.TLD</value><source>mapred-default.xml</source></property>
301+<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value><source>core-default.xml</source></property>
302+<property><name>mapreduce.shuffle.ssl.file.buffer.size</name><value>65536</value><source>mapred-default.xml</source></property>
303+<property><name>mapreduce.jobtracker.expire.trackers.interval</name><value>600000</value><source>mapred-default.xml</source></property>
304+<property><name>mapreduce.cluster.acls.enabled</name><value>false</value><source>mapred-default.xml</source></property>
305+<property><name>yarn.nodemanager.remote-app-log-dir-suffix</name><value>logs</value><source>yarn-default.xml</source></property>
306+<property><name>ha.failover-controller.graceful-fence.connection.retries</name><value>1</value><source>core-default.xml</source></property>
307+<property><name>ha.health-monitor.connect-retry-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
308+<property><name>yarn.app.mapreduce.am.resource.mb</name><value>1536</value><source>mapred-default.xml</source></property>
309+<property><name>io.seqfile.local.dir</name><value>${hadoop.tmp.dir}/io/local</value><source>core-default.xml</source></property>
310+<property><name>mapreduce.reduce.shuffle.merge.percent</name><value>0.66</value><source>mapred-default.xml</source></property>
311+<property><name>tfile.io.chunk.size</name><value>1048576</value><source>core-default.xml</source></property>
312+<property><name>file.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
313+<property><name>mapreduce.jobtracker.jobhistory.lru.cache.size</name><value>5</value><source>mapred-default.xml</source></property>
314+<property><name>mapreduce.jobtracker.maxtasks.perjob</name><value>-1</value><source>mapred-default.xml</source></property>
315+<property><name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
316+<property><name>mapreduce.job.acl-modify-job</name><value> </value><source>mapred-default.xml</source></property>
317+<property><name>yarn.nodemanager.webapp.address</name><value>0.0.0.0:8042</value><source>yarn-default.xml</source></property>
318+<property><name>mapreduce.tasktracker.reduce.tasks.maximum</name><value>2</value><source>mapred-default.xml</source></property>
319+<property><name>mapreduce.cluster.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value><source>mapred-default.xml</source></property>
320+<property><name>io.skip.checksum.errors</name><value>false</value><source>core-default.xml</source></property>
321+<property><name>yarn.app.mapreduce.am.staging-dir</name><value>/tmp/hadoop-yarn/staging</value><source>mapred-default.xml</source></property>
322+<property><name>fs.ftp.host.port</name><value>21</value><source>core-default.xml</source></property>
323+<property><name>yarn.resourcemanager.admin.client.thread-count</name><value>1</value><source>yarn-default.xml</source></property>
324+<property><name>fs.AbstractFileSystem.viewfs.impl</name><value>org.apache.hadoop.fs.viewfs.ViewFs</value><source>core-default.xml</source></property>
325+<property><name>yarn.resourcemanager.resource-tracker.client.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
326+<property><name>mapreduce.tasktracker.dns.nameserver</name><value>default</value><source>mapred-default.xml</source></property>
327+<property><name>mapreduce.map.output.compress</name><value>false</value><source>mapred-default.xml</source></property>
328+<property><name>yarn.nodemanager.delete.debug-delay-sec</name><value>0</value><source>yarn-default.xml</source></property>
329+<property><name>hadoop.ssl.require.client.cert</name><value>false</value><source>core-default.xml</source></property>
330+</configuration>
--- hadoop_conf/tags/localhost-2.0/2.0.4a-1/configuration.xsl (nonexistent)
+++ hadoop_conf/tags/localhost-2.0/2.0.4a-1/configuration.xsl (revision 539)
@@ -0,0 +1,24 @@
1+<?xml version="1.0"?>
2+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
3+<xsl:output method="html"/>
4+<xsl:template match="configuration">
5+<html>
6+<body>
7+<table border="1">
8+<tr>
9+ <th>name</th>
10+ <th>value</th>
11+ <th>source</th>
12+</tr>
13+<xsl:for-each select="property">
14+<tr>
15+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
16+ <td><xsl:value-of select="value"/></td>
17+ <td><xsl:value-of select="source"/></td>
18+</tr>
19+</xsl:for-each>
20+</table>
21+</body>
22+</html>
23+</xsl:template>
24+</xsl:stylesheet>
Show on old repository browser