• R/O
  • SSH
  • HTTPS

metasearch: Commit


Commit MetaInfo

Revision569 (tree)
Time2013-10-25 20:20:48
Authorwhitestar

Log Message

add SPNEGO configurations for YARN and JobHistoryServer web console.

Change Summary

Incremental Difference

--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/mapred-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/mapred-site.xml (revision 569)
@@ -0,0 +1,89 @@
1+<?xml version="1.0"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.jobhistory.fqdn</name>
22+ <value>localhost</value>
23+ <!-- <value>${this.cluster.name}-jt.${this.domain}</value> -->
24+ <!-- <value>${this.cluster.name}-jh.${this.domain}</value> -->
25+ </property>
26+
27+ <property>
28+ <name>mapreduce.framework.name</name>
29+ <value>yarn</value>
30+ <description>The runtime framework for executing MapReduce jobs.
31+ Can be one of local, classic or yarn.
32+ (default: local)
33+ </description>
34+ </property>
35+ <property>
36+ <name>yarn.app.mapreduce.am.staging-dir</name>
37+ <value>/user</value>
38+ </property>
39+ <property>
40+ <name>mapreduce.jobhistory.intermediate-done-dir</name>
41+ <value>/grid/vol/0/var/lib/mapred/history/done_intermediate</value>
42+ <!-- NG: <value>/user</value> -->
43+ </property>
44+ <property>
45+ <name>mapreduce.jobhistory.done-dir</name>
46+ <value>/grid/vol/0/var/lib/mapred/history/done</value>
47+ </property>
48+
49+ <!-- NOT necessary.
50+ <property>
51+ <name>mapreduce.cluster.local.dir</name>
52+ <value>/grid/vol/0/var/lib/mapred/local</value>
53+ <description>
54+ The local directory where MapReduce stores intermediate data files.
55+ May be a comma-separated list of directories on different devices in order to spread disk i/o.
56+ Directories that do not exist are ignored.
57+ </description>
58+ </property>
59+ -->
60+ <!-- NOT necessary.
61+ <property>
62+ <name>mapreduce.cluster.temp.dir</name>
63+ <value>/grid/vol/0/tmp/mapred</value>
64+ <description>
65+ A shared directory for temporary files.
66+ </description>
67+ </property>
68+ -->
69+
70+ <property>
71+ <name>mapreduce.jobhistory.principal</name>
72+ <value>mapred/${this.jobhistory.fqdn}@${this.realm}</value>
73+ <!-- <value>mapred/_HOST@${this.realm}</value> -->
74+ </property>
75+ <property>
76+ <name>mapreduce.jobhistory.keytab</name>
77+ <value>${this.keytab.dir}/jh.keytab</value>
78+ </property>
79+
80+ <property>
81+ <name>mapreduce.jobhistory.webapp.spnego-principal</name>
82+ <value>HTTP/${this.jobhistory.fqdn}@${this.realm}</value>
83+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
84+ </property>
85+ <property>
86+ <name>mapreduce.jobhistory.webapp.spnego-keytab-file</name>
87+ <value>${this.keytab.dir}/HTTP.keytab</value>
88+ </property>
89+</configuration>
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/yarn-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/yarn-site.xml (revision 569)
@@ -0,0 +1,183 @@
1+<?xml version="1.0"?>
2+<!--
3+ Licensed under the Apache License, Version 2.0 (the "License");
4+ you may not use this file except in compliance with the License.
5+ You may obtain a copy of the License at
6+
7+ http://www.apache.org/licenses/LICENSE-2.0
8+
9+ Unless required by applicable law or agreed to in writing, software
10+ distributed under the License is distributed on an "AS IS" BASIS,
11+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+ See the License for the specific language governing permissions and
13+ limitations under the License. See accompanying LICENSE file.
14+-->
15+<configuration>
16+
17+<!-- Site specific YARN configuration properties -->
18+ <property>
19+ <name>this.resourcemanager.fqdn</name>
20+ <value>localhost</value>
21+ <!-- <value>${this.cluster.name}-rm.${this.domain}</value> -->
22+ </property>
23+
24+ <property>
25+ <name>yarn.log-aggregation-enable</name>
26+ <value>true</value>
27+ </property>
28+ <property>
29+ <name>yarn.resourcemanager.scheduler.class</name>
30+ <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
31+ <description>In case you do not want to use the default scheduler</description>
32+ </property>
33+ <property>
34+ <name>yarn.nodemanager.local-dirs</name>
35+ <value>/grid/vol/0/var/lib/${user.name}/nm/local</value>
36+ <!-- <value>/grid/vol/0/var/lib/${user.name}/nm/local,/grid/vol/1/var/lib/${user.name}/nm/local</value> -->
37+ <description>the local directories used by the nodemanager
38+ (default: /tmp/nm-local-dir)</description>
39+ </property>
40+ <property>
41+ <name>yarn.nodemanager.resource.memory-mb</name>
42+ <value>8192</value>
43+ <description>Amount of physical memory, in MB, that can be allocated
44+ for containers. (default: 8192)</description>
45+ </property>
46+ <property>
47+ <name>yarn.nodemanager.resource.cpu-cores</name>
48+ <value>8</value>
49+ <description>Number of CPU cores that can be allocated
50+ for containers. (default: 8)</description>
51+ </property>
52+ <property>
53+ <name>yarn.nodemanager.remote-app-log-dir</name>
54+ <value>/grid/vol/0/var/log/${user.name}/nm</value>
55+ <description>directory on hdfs where the application logs are moved to
56+ (default: /tmp/logs)</description>
57+ </property>
58+ <property>
59+ <name>yarn.nodemanager.log-dirs</name>
60+ <value>/grid/vol/0/var/log/${user.name}/nm</value>
61+ <!-- <value>/grid/vol/0/var/log/${user.name}/nm,/grid/vol/1/var/log/${user.name}/nm</value> -->
62+ <description>the directories used by Nodemanagers as log directories
63+ (default: /tmp/logs)</description>
64+ </property>
65+ <property>
66+ <name>yarn.nodemanager.aux-services</name>
67+ <value>mapreduce_shuffle</value>
68+ <description>shuffle service that needs to be set for Map Reduce to run</description>
69+ </property>
70+ <property>
71+ <name>yarn.application.classpath</name>
72+ <value>
73+ $HADOOP_CONF_DIR,
74+ $HADOOP_COMMON_HOME/share/hadoop/common/*,
75+ $HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
76+ $HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
77+ $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
78+ $HADOOP_YARN_HOME/share/hadoop/yarn/*,
79+ $HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value>
80+ <description>Classpath for typical applications.</description>
81+ </property>
82+
83+ <property>
84+ <name>yarn.resourcemanager.nodes.include-path</name>
85+ <value>/grid/usr/hadoop/etc/hadoop/hosts.include</value>
86+ <description>Path to file with nodes to include.</description>
87+ </property>
88+ <property>
89+ <name>yarn.resourcemanager.nodes.exclude-path</name>
90+ <value>/grid/usr/hadoop/etc/hadoop/hosts.exclude</value>
91+ <description>Path to file with nodes to exclude.</description>
92+ </property>
93+
94+ <property>
95+ <name>yarn.nodemanager.admin-env</name>
96+ <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX,LD_LIBRARY_PATH=${HADOOP_COMMON_HOME}/lib/native</value>
97+ </property>
98+
99+ <property>
100+ <name>yarn.acl.enable</name>
101+ <value>true</value>
102+ </property>
103+ <property>
104+ <name>yarn.admin.acl</name>
105+ <value> yarn,gridops</value>
106+ </property>
107+ <property>
108+ <name>yarn.resourcemanager.principal</name>
109+ <value>yarn/${this.resourcemanager.fqdn}@${this.realm}</value>
110+ <!-- <value>yarn/_HOST@${this.realm}</value> -->
111+ </property>
112+ <property>
113+ <name>yarn.resourcemanager.keytab</name>
114+ <value>${this.keytab.dir}/rm.keytab</value>
115+ </property>
116+ <property>
117+ <name>yarn.nodemanager.principal</name>
118+ <value>yarn/localhost@${this.realm}</value>
119+ <!-- <value>yarn/_HOST@${this.realm}</value> -->
120+ </property>
121+ <property>
122+ <name>yarn.nodemanager.keytab</name>
123+ <value>${this.keytab.dir}/nm.keytab</value>
124+ </property>
125+
126+ <property>
127+ <name>yarn.nodemanager.container-executor.class</name>
128+ <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
129+ </property>
130+ <property>
131+ <name>yarn.nodemanager.linux-container-executor.group</name>
132+ <value>yarn</value>
133+ </property>
134+ <property>
135+ <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
136+ <value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value>
137+ <description>The class which should help the LCE handle resources.</description>
138+ </property>
139+ <property>
140+ <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
141+ <value>/hadoop-yarn</value>
142+ <description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas).
143+ If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have
144+ been pre-configured), then this cgroups hierarchy must already exist and be writable by the
145+ NodeManager user, otherwise the NodeManager may fail.
146+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
147+ </property>
148+ <property>
149+ <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
150+ <value>false</value>
151+ <description>Whether the LCE should attempt to mount cgroups if not found.
152+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
153+ </property>
154+ <property>
155+ <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
156+ <value></value>
157+ <description>Where the LCE should attempt to mount cgroups if not found. Common locations
158+ include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux
159+ distribution in use. This path must exist before the NodeManager is launched.
160+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and
161+ yarn.nodemanager.linux-container-executor.cgroups.mount is true.</description>
162+ </property>
163+
164+ <property>
165+ <name>yarn.resourcemanager.webapp.spnego-principal</name>
166+ <value>HTTP/${this.resourcemanager.fqdn}@${this.realm}</value>
167+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
168+ </property>
169+ <property>
170+ <name>yarn.resourcemanager.webapp.spnego-keytab-file</name>
171+ <value>${this.keytab.dir}/HTTP.keytab</value>
172+ </property>
173+ <property>
174+ <name>yarn.nodemanager.webapp.spnego-principal</name>
175+ <value>HTTP/localhost@${this.realm}</value>
176+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
177+ </property>
178+ <property>
179+ <name>yarn.nodemanager.webapp.spnego-keytab-file</name>
180+ <value>${this.keytab.dir}/HTTP.keytab</value>
181+ </property>
182+</configuration>
183+
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/hdfs-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/hdfs-site.xml (revision 569)
@@ -0,0 +1,171 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.secondary.namenode.fqdn</name>
22+ <value>localhost</value>
23+ <!-- <value>${this.cluster.name}-cn.${this.domain}</value> -->
24+ </property>
25+
26+ <property>
27+ <name>dfs.namenode.name.dir</name>
28+ <value>file:///grid/vol/0/var/lib/${user.name}/name</value>
29+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/name,file:///export/home/${user.name}/var/lib/name</value> -->
30+ </property>
31+ <property>
32+ <name>dfs.datanode.data.dir</name>
33+ <value>file:///grid/vol/0/var/lib/${user.name}/data</value>
34+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/data,file:///grid/vol/1/var/lib/${user.name}/data</value> -->
35+ </property>
36+ <property>
37+ <name>dfs.namenode.checkpoint.dir</name>
38+ <value>file:///grid/vol/0/var/lib/${user.name}/checkpoint</value>
39+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/checkpoint,file:///export/home/${user.name}/var/lib/checkpoint</value> -->
40+ </property>
41+ <property>
42+ <name>dfs.replication</name>
43+ <value>1</value>
44+ <!-- <value>3</value> -->
45+ </property>
46+
47+ <property>
48+ <name>dfs.hosts</name>
49+ <value>/grid/usr/hadoop/etc/hadoop/hosts.include</value>
50+ <description>
51+ Names a file that contains a list of hosts that are permitted to connect to the namenode.
52+ The full pathname of the file must be specified. If the value is empty, all hosts are permitted.
53+ </description>
54+ </property>
55+ <property>
56+ <name>dfs.hosts.exclude</name>
57+ <value>/grid/usr/hadoop/etc/hadoop/hosts.exclude</value>
58+ <description>
59+ Names a file that contains a list of hosts that are not permitted to connect to the namenode.
60+ The full pathname of the file must be specified. If the value is empty, no hosts are excluded.
61+ </description>
62+ </property>
63+
64+ <property>
65+ <name>dfs.namenode.kerberos.principal</name>
66+ <value>hdfs/_HOST@${this.realm}</value>
67+ <!-- _HOST is replaced with the fs.defaultFS's host name -->
68+ <!-- <value>hdfs/${this.namenode.fqdn}@${this.realm}</value> -->
69+ <description>Kerberos principal name for the NameNode</description>
70+ </property>
71+ <property>
72+ <name>dfs.namenode.keytab.file</name>
73+ <value>${this.keytab.dir}/nn.keytab</value>
74+ <description>
75+ Combined keytab file containing the namenode service and host
76+ principals.
77+ </description>
78+ </property>
79+ <property>
80+ <name>dfs.secondary.namenode.kerberos.principal</name>
81+ <value>hdfs/${this.secondary.namenode.fqdn}@${this.realm}</value>
82+ <!-- <value>hdfs/_HOST@${this.realm}</value> -->
83+ <description>
84+ Kerberos principal name for the secondary NameNode.
85+ </description>
86+ </property>
87+ <property>
88+ <name>dfs.secondary.namenode.keytab.file</name>
89+ <value>${this.keytab.dir}/cn.keytab</value>
90+ <description>
91+ Combined keytab file containing the namenode service and host
92+ principals.
93+ </description>
94+ </property>
95+ <!-- for KSSL (NOT RECOMMENDED). Note: N/A on the CDH4 -->
96+ <property>
97+ <name>hadoop.security.use-weak-http-crypto</name>
98+ <value>false</value>
99+ </property>
100+ <property>
101+ <name>dfs.block.access.token.enable</name>
102+ <value>true</value>
103+ <description>
104+ If "true", access tokens are used as capabilities for accessing
105+ datanodes.
106+ If "false", no access tokens are checked on accessing datanodes.
107+ </description>
108+ </property>
109+ <property>
110+ <name>dfs.datanode.kerberos.principal</name>
111+ <value>hdfs/localhost@${this.realm}</value>
112+ <!-- <value>hdfs/_HOST@${this.realm}</value> -->
113+ <description>
114+ The Kerberos principal that the DataNode runs as. "_HOST" is
115+ replaced by the real host name.
116+ </description>
117+ </property>
118+ <property>
119+ <name>dfs.datanode.keytab.file</name>
120+ <value>${this.keytab.dir}/dn.keytab</value>
121+ <description>
122+ The filename of the keytab file for the DataNode.
123+ </description>
124+ </property>
125+ <property>
126+ <name>dfs.namenode.kerberos.internal.spnego.principal</name>
127+ <value>${dfs.web.authentication.kerberos.principal}</value>
128+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
129+ <!-- _HOST is replaced with dfs.namenode.http-address's host name. -->
130+ </property>
131+ <property>
132+ <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
133+ <value>HTTP/${this.secondary.namenode.fqdn}@${this.realm}</value>
134+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
135+ <!-- _HOST is replaced with dfs.namenode.secondary.http-address's host name. -->
136+ </property>
137+
138+ <property>
139+ <name>dfs.datanode.address</name>
140+ <value>0.0.0.0:1004</value>
141+ </property>
142+ <property>
143+ <name>dfs.datanode.http.address</name>
144+ <value>0.0.0.0:1006</value>
145+ </property>
146+
147+ <property>
148+ <name>dfs.namenode.http-address</name>
149+ <value>${this.namenode.fqdn}:50070</value>
150+ </property>
151+ <property>
152+ <name>dfs.namenode.secondary.http-address</name>
153+ <value>${this.secondary.namenode.fqdn}:50090</value>
154+ </property>
155+ <property>
156+ <name>dfs.web.authentication.kerberos.principal</name>
157+ <value>HTTP/_HOST@${this.realm}</value>
158+ </property>
159+ <property>
160+ <name>dfs.web.authentication.kerberos.keytab</name>
161+ <value>${this.keytab.dir}/HTTP.keytab</value>
162+ </property>
163+ <property>
164+ <name>dfs.support.append</name>
165+ <value>true</value>
166+ </property>
167+ <property>
168+ <name>dfs.datanode.max.xcievers</name>
169+ <value>4096</value>
170+ </property>
171+</configuration>
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/core-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/core-site.xml (revision 569)
@@ -0,0 +1,142 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.cluster.name</name>
22+ <value>localhost</value>
23+ <!-- <value>pleiades</value> -->
24+ </property>
25+ <property>
26+ <name>this.domain</name>
27+ <value>localhost</value>
28+ <!-- <value>grid.example.com</value> -->
29+ </property>
30+ <property>
31+ <name>this.realm</name>
32+ <value>LOCALDOMAIN</value>
33+ <!-- <value>GRID.EXAMPLE.COM</value> -->
34+ </property>
35+ <property>
36+ <name>this.keytab.dir</name>
37+ <value>/grid/etc/keytabs/localhost</value>
38+ </property>
39+ <property>
40+ <name>this.namenode.fqdn</name>
41+ <value>localhost</value>
42+ <!-- <value>${this.cluster.name}-nn.${this.domain}</value> -->
43+ </property>
44+
45+ <property>
46+ <name>fs.defaultFS</name>
47+ <value>hdfs://${this.namenode.fqdn}:9000</value>
48+ </property>
49+ <property>
50+ <name>hadoop.tmp.dir</name>
51+ <value>/tmp/hadoop-${user.name}</value>
52+ </property>
53+
54+ <property>
55+ <name>hadoop.security.authentication</name>
56+ <value>kerberos</value>
57+ <description>
58+ Set the authentication for the cluster. Valid values are: simple or
59+ kerberos.
60+ </description>
61+ </property>
62+ <property>
63+ <name>hadoop.security.authorization</name>
64+ <value>true</value>
65+ <description>
66+ Enable authorization for different protocols.
67+ </description>
68+ </property>
69+ <property>
70+ <name>hadoop.security.auth_to_local</name>
71+ <value>
72+ RULE:[2:$1@$0](.*@${this.realm})s/@.*//
73+ RULE:[1:$1@$0](.*@${this.realm})s/@.*//
74+ RULE:[2:$1@$0](hdfs@.*${this.realm})s/.*/hdfs/
75+ RULE:[2:$1@$0](yarn@.*${this.realm})s/.*/yarn/
76+ RULE:[2:$1@$0](mapred@.*${this.realm})s/.*/mapred/
77+ DEFAULT</value>
78+ </property>
79+ <property>
80+ <name>hadoop.security.group.mapping</name>
81+ <value>org.apache.hadoop.security.JniBasedUnixGroupsMapping</value>
82+ </property>
83+ <property>
84+ <name>hadoop.security.groups.cache.secs</name>
85+ <value>14400</value>
86+ </property>
87+ <property>
88+ <name>hadoop.kerberos.kinit.command</name>
89+ <value>/usr/bin/kinit</value>
90+ </property>
91+
92+ <property>
93+ <name>hadoop.http.filter.initializers</name>
94+ <value>org.apache.hadoop.security.AuthenticationFilterInitializer</value>
95+ <!-- <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value> -->
96+ <description>The name of a class that initializes an input filter for Jetty.
97+ This filter will always return Dr.Who as the web user when the servlets
98+ query for the authenticated user </description>
99+ </property>
100+ <property>
101+ <name>hadoop.http.authentication.signature.secret.file</name>
102+ <value>/grid/etc/hadoop-http-auth-signature-secret</value>
103+ </property>
104+ <property>
105+ <name>hadoop.http.authentication.cookie.domain</name>
106+ <value>${this.domain}</value>
107+ </property>
108+ <property>
109+ <name>hadoop.http.authentication.type</name>
110+ <value>simple</value>
111+ <description>Defines authentication used for the HTTP web-consoles.
112+ The supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#.
113+ The dfeault value is simple.</description>
114+ </property>
115+ <property>
116+ <name>hadoop.http.authentication.kerberos.principal</name>
117+ <value>HTTP/localhost@${this.realm}</value>
118+ <!-- <value>HTTP/_HOST@${this.realm}</value>
119+ _HOST N/A!: v1.0, HDP1.2; OK: v2.0, CDH3, CDH4 -->
120+ </property>
121+ <property>
122+ <name>hadoop.http.authentication.kerberos.keytab</name>
123+ <value>${this.keytab.dir}/HTTP.keytab</value>
124+ </property>
125+
126+ <property>
127+ <name>hadoop.proxyuser.oozie.hosts</name>
128+ <value>localhost</value>
129+ </property>
130+ <property>
131+ <name>hadoop.proxyuser.oozie.groups</name>
132+ <value>hadoopers</value>
133+ </property>
134+ <property>
135+ <name>hadoop.proxyuser.httpfs.hosts</name>
136+ <value>localhost</value>
137+ </property>
138+ <property>
139+ <name>hadoop.proxyuser.httpfs.groups</name>
140+ <value>hadoopers</value>
141+ </property>
142+</configuration>
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/hadoop-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/hadoop-env.sh (revision 569)
@@ -0,0 +1,112 @@
1+# Copyright 2011 The Apache Software Foundation
2+#
3+# Licensed to the Apache Software Foundation (ASF) under one
4+# or more contributor license agreements. See the NOTICE file
5+# distributed with this work for additional information
6+# regarding copyright ownership. The ASF licenses this file
7+# to you under the Apache License, Version 2.0 (the
8+# "License"); you may not use this file except in compliance
9+# with the License. You may obtain a copy of the License at
10+#
11+# http://www.apache.org/licenses/LICENSE-2.0
12+#
13+# Unless required by applicable law or agreed to in writing, software
14+# distributed under the License is distributed on an "AS IS" BASIS,
15+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+# See the License for the specific language governing permissions and
17+# limitations under the License.
18+
19+# Set Hadoop-specific environment variables here.
20+
21+
22+export JAVA_HOME=/usr/local/jvm/java-6-ora
23+# The directory where pid files are stored. /tmp by default.
24+export HADOOP_PID_DIR=/grid/vol/0/var/run/${USER}
25+# Where log files are stored. $HADOOP_PREFIX/logs by default.
26+#export HADOOP_LOG_DIR=/grid/vol/0/var/log/${USER}
27+# for secure datanode. $USER ('root': Apache, HDP; '': CDH)
28+if [ x"$USER" = x'root' -o x"$USER" = x'' ]; then
29+ export HADOOP_LOG_DIR=/grid/vol/0/var/log/hdfs
30+else
31+ export HADOOP_LOG_DIR=/grid/vol/0/var/log/${USER}
32+fi
33+
34+# Extra Java CLASSPATH elements. Optional.
35+if [ x"$HADOOP_CLASSPATH" = x ]; then
36+ export HADOOP_CLASSPATH="/usr/share/java/commons-daemon.jar"
37+ #export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/grid/usr/commons-daemon-1.0.13/commons-daemon-1.0.13.jar
38+else
39+ # for Hive and HCatalog
40+ export HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:/usr/share/java/commons-daemon.jar"
41+ #export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/grid/usr/commons-daemon-1.0.13/commons-daemon-1.0.13.jar
42+fi
43+export HADOOP_USER_CLASSPATH_FIRST=true
44+
45+export HADOOP_SECURE_DN_USER=hdfs
46+# This property is N/A or overridden by the HADOOP_PID_DIR
47+#export HADOOP_SECURE_DN_PID_DIR=/grid/vol/0/var/run/${HADOOP_SECURE_DN_USER}
48+# This property is N/A or overridden by the HADOOP_LOG_DIR
49+#export HADOOP_SECURE_DN_LOG_DIR=/grid/vol/0/var/log/${HADOOP_SECURE_DN_USER}
50+export JSVC_HOME=/usr/bin
51+#export JSVC_HOME=/grid/usr/hadoop/sbin
52+
53+
54+# The only required environment variable is JAVA_HOME. All others are
55+# optional. When running a distributed configuration it is best to
56+# set JAVA_HOME in this file, so that it is correctly defined on
57+# remote nodes.
58+
59+# The java implementation to use.
60+export JAVA_HOME=${JAVA_HOME}
61+
62+# The jsvc implementation to use. Jsvc is required to run secure datanodes.
63+#export JSVC_HOME=${JSVC_HOME}
64+
65+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
66+
67+# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
68+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
69+ if [ "$HADOOP_CLASSPATH" ]; then
70+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
71+ else
72+ export HADOOP_CLASSPATH=$f
73+ fi
74+done
75+
76+# The maximum amount of heap to use, in MB. Default is 1000.
77+#export HADOOP_HEAPSIZE=
78+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
79+
80+# Extra Java runtime options. Empty by default.
81+export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
82+
83+# Command specific options appended to HADOOP_OPTS when specified
84+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
85+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
86+
87+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
88+
89+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
90+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
91+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
92+
93+# On secure datanodes, user to run the datanode as after dropping privileges
94+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
95+
96+# Where log files are stored. $HADOOP_HOME/logs by default.
97+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
98+
99+# Where log files are stored in the secure data environment.
100+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
101+
102+# The directory where pid files are stored. /tmp by default.
103+# NOTE: this should be set to a directory that can only be written to by
104+# the user that will run the hadoop daemons. Otherwise there is the
105+# potential for a symlink attack.
106+export HADOOP_PID_DIR=${HADOOP_PID_DIR}
107+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
108+
109+# A string representing this instance of hadoop. $USER by default.
110+export HADOOP_IDENT_STRING=$USER
111+
112+
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/hadoop-policy.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/hadoop-policy.xml (revision 569)
@@ -0,0 +1,219 @@
1+<?xml version="1.0"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+
5+ Copyright 2011 The Apache Software Foundation
6+
7+ Licensed to the Apache Software Foundation (ASF) under one
8+ or more contributor license agreements. See the NOTICE file
9+ distributed with this work for additional information
10+ regarding copyright ownership. The ASF licenses this file
11+ to you under the Apache License, Version 2.0 (the
12+ "License"); you may not use this file except in compliance
13+ with the License. You may obtain a copy of the License at
14+
15+ http://www.apache.org/licenses/LICENSE-2.0
16+
17+ Unless required by applicable law or agreed to in writing, software
18+ distributed under the License is distributed on an "AS IS" BASIS,
19+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20+ See the License for the specific language governing permissions and
21+ limitations under the License.
22+
23+-->
24+
25+<!-- Put site-specific property overrides in this file. -->
26+
27+<configuration>
28+ <property>
29+ <name>security.client.protocol.acl</name>
30+ <value>*</value>
31+ <description>ACL for ClientProtocol, which is used by user code
32+ via the DistributedFileSystem.
33+ The ACL is a comma-separated list of user and group names. The user and
34+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
35+ A special value of "*" means all users are allowed.</description>
36+ </property>
37+
38+ <property>
39+ <name>security.client.datanode.protocol.acl</name>
40+ <value>*</value>
41+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
42+ for block recovery.
43+ The ACL is a comma-separated list of user and group names. The user and
44+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
45+ A special value of "*" means all users are allowed.</description>
46+ </property>
47+
48+ <property>
49+ <name>security.datanode.protocol.acl</name>
50+ <value>*</value>
51+ <description>ACL for DatanodeProtocol, which is used by datanodes to
52+ communicate with the namenode.
53+ The ACL is a comma-separated list of user and group names. The user and
54+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
55+ A special value of "*" means all users are allowed.</description>
56+ </property>
57+
58+ <property>
59+ <name>security.inter.datanode.protocol.acl</name>
60+ <value>*</value>
61+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
62+ for updating generation timestamp.
63+ The ACL is a comma-separated list of user and group names. The user and
64+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
65+ A special value of "*" means all users are allowed.</description>
66+ </property>
67+
68+ <property>
69+ <name>security.namenode.protocol.acl</name>
70+ <value>*</value>
71+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
72+ namenode to communicate with the namenode.
73+ The ACL is a comma-separated list of user and group names. The user and
74+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
75+ A special value of "*" means all users are allowed.</description>
76+ </property>
77+
78+ <property>
79+ <name>security.admin.operations.protocol.acl</name>
80+ <value>hdfs,mapred hadoop</value>
81+ <description>ACL for AdminOperationsProtocol. Used for admin commands.
82+ The ACL is a comma-separated list of user and group names. The user and
83+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
84+ A special value of "*" means all users are allowed.</description>
85+ </property>
86+
87+ <property>
88+ <name>security.refresh.usertogroups.mappings.protocol.acl</name>
89+ <value>hdfs hadoop</value>
90+ <description>ACL for RefreshUserMappingsProtocol. Used to refresh
91+ users mappings. The ACL is a comma-separated list of user and
92+ group names. The user and group list is separated by a blank. For
93+ e.g. "alice,bob users,wheel". A special value of "*" means all
94+ users are allowed.</description>
95+ </property>
96+
97+ <property>
98+ <name>security.refresh.policy.protocol.acl</name>
99+ <value>hdfs,mapred hadoop</value>
100+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
101+ dfsadmin and mradmin commands to refresh the security policy in-effect.
102+ The ACL is a comma-separated list of user and group names. The user and
103+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
104+ A special value of "*" means all users are allowed.</description>
105+ </property>
106+
107+ <property>
108+ <name>security.ha.service.protocol.acl</name>
109+ <value>hdfs hadoop</value>
110+ <description>ACL for HAService protocol used by HAAdmin to manage the
111+ active and stand-by states of namenode.</description>
112+ </property>
113+
114+ <property>
115+ <name>security.zkfc.protocol.acl</name>
116+ <value>hdfs hadoop</value>
117+ <description>ACL for access to the ZK Failover Controller
118+ </description>
119+ </property>
120+
121+ <property>
122+ <name>security.qjournal.service.protocol.acl</name>
123+ <value>hdfs</value>
124+ <description>ACL for QJournalProtocol, used by the NN to communicate with
125+ JNs when using the QuorumJournalManager for edit logs.</description>
126+ </property>
127+
128+ <property>
129+ <name>security.mrhs.client.protocol.acl</name>
130+ <value>*</value>
131+ <description>ACL for HSClientProtocol, used by job clients to
132+ communciate with the MR History Server job status etc.
133+ The ACL is a comma-separated list of user and group names. The user and
134+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
135+ A special value of "*" means all users are allowed.</description>
136+ </property>
137+
138+ <!-- YARN Protocols -->
139+
140+ <property>
141+ <name>security.resourcetracker.protocol.acl</name>
142+ <value>yarn</value>
143+ <description>ACL for ResourceTrackerProtocol, used by the
144+ ResourceManager and NodeManager to communicate with each other.
145+ The ACL is a comma-separated list of user and group names. The user and
146+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
147+ A special value of "*" means all users are allowed.</description>
148+ </property>
149+
150+ <property>
151+ <name>security.resourcemanager-administration.protocol.acl</name>
152+ <value>yarn hadoop</value>
153+ <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
154+ The ACL is a comma-separated list of user and group names. The user and
155+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
156+ A special value of "*" means all users are allowed.</description>
157+ </property>
158+
159+ <property>
160+ <name>security.applicationclient.protocol.acl</name>
161+ <value>*</value>
162+ <description>ACL for ApplicationClientProtocol, used by the ResourceManager
163+ and applications submission clients to communicate with each other.
164+ The ACL is a comma-separated list of user and group names. The user and
165+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
166+ A special value of "*" means all users are allowed.</description>
167+ </property>
168+
169+ <property>
170+ <name>security.applicationmaster.protocol.acl</name>
171+ <value>*</value>
172+ <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
173+ and ApplicationMasters to communicate with each other.
174+ The ACL is a comma-separated list of user and group names. The user and
175+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
176+ A special value of "*" means all users are allowed.</description>
177+ </property>
178+
179+ <property>
180+ <name>security.containermanagement.protocol.acl</name>
181+ <value>*</value>
182+ <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
183+ and ApplicationMasters to communicate with each other.
184+ The ACL is a comma-separated list of user and group names. The user and
185+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
186+ A special value of "*" means all users are allowed.</description>
187+ </property>
188+
189+ <property>
190+ <name>security.resourcelocalizer.protocol.acl</name>
191+ <value>*</value>
192+ <description>ACL for ResourceLocalizer protocol, used by the NodeManager
193+ and ResourceLocalizer to communicate with each other.
194+ The ACL is a comma-separated list of user and group names. The user and
195+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
196+ A special value of "*" means all users are allowed.</description>
197+ </property>
198+
199+ <property>
200+ <name>security.job.task.protocol.acl</name>
201+ <value>*</value>
202+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
203+ tasks to communicate with the parent tasktracker.
204+ The ACL is a comma-separated list of user and group names. The user and
205+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
206+ A special value of "*" means all users are allowed.</description>
207+ </property>
208+
209+ <property>
210+ <name>security.job.client.protocol.acl</name>
211+ <value>*</value>
212+ <description>ACL for MRClientProtocol, used by job clients to
213+ communciate with the MR ApplicationMaster to query job status etc.
214+ The ACL is a comma-separated list of user and group names. The user and
215+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
216+ A special value of "*" means all users are allowed.</description>
217+ </property>
218+
219+</configuration>
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/httpfs-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/httpfs-env.sh (revision 569)
@@ -0,0 +1,41 @@
1+#!/bin/bash
2+#
3+# Licensed under the Apache License, Version 2.0 (the "License");
4+# you may not use this file except in compliance with the License.
5+# You may obtain a copy of the License at
6+#
7+# http://www.apache.org/licenses/LICENSE-2.0
8+#
9+# Unless required by applicable law or agreed to in writing, software
10+# distributed under the License is distributed on an "AS IS" BASIS,
11+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+# See the License for the specific language governing permissions and
13+# limitations under the License. See accompanying LICENSE file.
14+#
15+
16+# Set httpfs specific environment variables here.
17+
18+# Settings for the Embedded Tomcat that runs HttpFS
19+# Java System properties for HttpFS should be specified in this variable
20+#
21+# export CATALINA_OPTS=
22+
23+# HttpFS logs directory
24+#
25+# export HTTPFS_LOG=${HTTPFS_HOME}/logs
26+
27+# HttpFS temporary directory
28+#
29+# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
30+
31+# The HTTP port used by HttpFS
32+#
33+# export HTTPFS_HTTP_PORT=14000
34+
35+# The Admin port used by HttpFS
36+#
37+# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
38+
39+# The hostname HttpFS server runs on
40+#
41+# export HTTPFS_HTTP_HOSTNAME=`hostname -f`
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/mapred-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/mapred-env.sh (revision 569)
@@ -0,0 +1,37 @@
1+# Licensed to the Apache Software Foundation (ASF) under one or more
2+# contributor license agreements. See the NOTICE file distributed with
3+# this work for additional information regarding copyright ownership.
4+# The ASF licenses this file to You under the Apache License, Version 2.0
5+# (the "License"); you may not use this file except in compliance with
6+# the License. You may obtain a copy of the License at
7+#
8+# http://www.apache.org/licenses/LICENSE-2.0
9+#
10+# Unless required by applicable law or agreed to in writing, software
11+# distributed under the License is distributed on an "AS IS" BASIS,
12+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+# See the License for the specific language governing permissions and
14+# limitations under the License.
15+
16+
17+export JAVA_HOME=/usr/local/jvm/java-6-ora
18+
19+# The directory where pid files are stored. /tmp by default.
20+export HADOOP_MAPRED_PID_DIR=/grid/vol/0/var/run/${USER}
21+# Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
22+export HADOOP_MAPRED_LOG_DIR=/grid/vol/0/var/log/${USER}
23+
24+
25+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
26+
27+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
28+
29+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
30+
31+#export HADOOP_JOB_HISTORYSERVER_OPTS=
32+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
33+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
34+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
35+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
36+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
37+
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/capacity-scheduler.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/capacity-scheduler.xml (revision 569)
@@ -0,0 +1,127 @@
1+<!--
2+ Licensed under the Apache License, Version 2.0 (the "License");
3+ you may not use this file except in compliance with the License.
4+ You may obtain a copy of the License at
5+
6+ http://www.apache.org/licenses/LICENSE-2.0
7+
8+ Unless required by applicable law or agreed to in writing, software
9+ distributed under the License is distributed on an "AS IS" BASIS,
10+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+ See the License for the specific language governing permissions and
12+ limitations under the License. See accompanying LICENSE file.
13+-->
14+<configuration>
15+
16+ <property>
17+ <name>yarn.scheduler.capacity.maximum-applications</name>
18+ <value>10000</value>
19+ <description>
20+ Maximum number of applications that can be pending and running.
21+ </description>
22+ </property>
23+
24+ <property>
25+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
26+ <value>0.1</value>
27+ <description>
28+ Maximum percent of resources in the cluster which can be used to run
29+ application masters i.e. controls number of concurrent running
30+ applications.
31+ </description>
32+ </property>
33+
34+ <property>
35+ <name>yarn.scheduler.capacity.resource-calculator</name>
36+ <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
37+ <description>
38+ The ResourceCalculator implementation to be used to compare
39+ Resources in the scheduler.
40+ The default i.e. DefaultResourceCalculator only uses Memory while
41+ DominantResourceCalculator uses dominant-resource to compare
42+ multi-dimensional resources such as Memory, CPU etc.
43+ </description>
44+ </property>
45+
46+ <property>
47+ <name>yarn.scheduler.capacity.root.queues</name>
48+ <value>default</value>
49+ <description>
50+ The queues at the this level (root is the root queue).
51+ </description>
52+ </property>
53+
54+ <property>
55+ <name>yarn.scheduler.capacity.root.acl_submit_applications</name>
56+ <value> </value>
57+ <description>
58+ The ACL of who can submit jobs to the root queue.
59+ </description>
60+ </property>
61+
62+ <property>
63+ <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
64+ <value> hadoop,gridops</value>
65+ <description>
66+ The ACL of who can administer jobs on the root queue.
67+ </description>
68+ </property>
69+
70+ <property>
71+ <name>yarn.scheduler.capacity.root.default.capacity</name>
72+ <value>100</value>
73+ <description>Default queue target capacity.</description>
74+ </property>
75+
76+ <property>
77+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
78+ <value>1</value>
79+ <description>
80+ Default queue user limit a percentage from 0.0 to 1.0.
81+ </description>
82+ </property>
83+
84+ <property>
85+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
86+ <value>100</value>
87+ <description>
88+ The maximum capacity of the default queue.
89+ </description>
90+ </property>
91+
92+ <property>
93+ <name>yarn.scheduler.capacity.root.default.state</name>
94+ <value>RUNNING</value>
95+ <description>
96+ The state of the default queue. State can be one of RUNNING or STOPPED.
97+ </description>
98+ </property>
99+
100+ <property>
101+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
102+ <value>*</value>
103+ <description>
104+ The ACL of who can submit jobs to the default queue.
105+ </description>
106+ </property>
107+
108+ <property>
109+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
110+ <value> hadoop,gridops</value>
111+ <description>
112+ The ACL of who can administer jobs on the default queue.
113+ </description>
114+ </property>
115+
116+ <property>
117+ <name>yarn.scheduler.capacity.node-locality-delay</name>
118+ <value>-1</value>
119+ <description>
120+ Number of missed scheduling opportunities after which the CapacityScheduler
121+ attempts to schedule rack-local containers.
122+ Typically this should be set to number of racks in the cluster, this
123+ feature is disabled by default, set to -1.
124+ </description>
125+ </property>
126+
127+</configuration>
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/yarn-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/yarn-env.sh (revision 569)
@@ -0,0 +1,125 @@
1+# Licensed to the Apache Software Foundation (ASF) under one or more
2+# contributor license agreements. See the NOTICE file distributed with
3+# this work for additional information regarding copyright ownership.
4+# The ASF licenses this file to You under the Apache License, Version 2.0
5+# (the "License"); you may not use this file except in compliance with
6+# the License. You may obtain a copy of the License at
7+#
8+# http://www.apache.org/licenses/LICENSE-2.0
9+#
10+# Unless required by applicable law or agreed to in writing, software
11+# distributed under the License is distributed on an "AS IS" BASIS,
12+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+# See the License for the specific language governing permissions and
14+# limitations under the License.
15+
16+
17+export JAVA_HOME=/usr/local/jvm/java-6-ora
18+
19+export YARN_LOG_DIR=/grid/vol/0/var/log/${USER}
20+export YARN_PID_DIR=/grid/vol/0/var/run/${USER}
21+export YARN_HOME=${HADOOP_PREFIX}
22+
23+export HADOOP_PREFIX=/grid/usr/hadoop
24+export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
25+export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
26+export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}
27+
28+
29+# User for YARN daemons
30+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
31+
32+# resolve links - $0 may be a softlink
33+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
34+
35+# some Java parameters
36+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
37+if [ "$JAVA_HOME" != "" ]; then
38+ #echo "run java in $JAVA_HOME"
39+ JAVA_HOME=$JAVA_HOME
40+fi
41+
42+if [ "$JAVA_HOME" = "" ]; then
43+ echo "Error: JAVA_HOME is not set."
44+ exit 1
45+fi
46+
47+JAVA=$JAVA_HOME/bin/java
48+JAVA_HEAP_MAX=-Xmx1000m
49+
50+# For setting YARN specific HEAP sizes please use this
51+# Parameter and set appropriately
52+# YARN_HEAPSIZE=1000
53+
54+# check envvars which might override default args
55+if [ "$YARN_HEAPSIZE" != "" ]; then
56+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
57+fi
58+
59+# Resource Manager specific parameters
60+
61+# Specify the max Heapsize for the ResourceManager using a numerical value
62+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
63+# the value to 1000.
64+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
65+# and/or YARN_RESOURCEMANAGER_OPTS.
66+# If not specified, the default value will be picked from either YARN_HEAPMAX
67+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
68+#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
69+
70+# Specify the JVM options to be used when starting the ResourceManager.
71+# These options will be appended to the options specified as YARN_OPTS
72+# and therefore may override any similar flags set in YARN_OPTS
73+#export YARN_RESOURCEMANAGER_OPTS=
74+
75+# Node Manager specific parameters
76+
77+# Specify the max Heapsize for the NodeManager using a numerical value
78+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
79+# the value to 1000.
80+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
81+# and/or YARN_NODEMANAGER_OPTS.
82+# If not specified, the default value will be picked from either YARN_HEAPMAX
83+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
84+#export YARN_NODEMANAGER_HEAPSIZE=1000
85+
86+# Specify the JVM options to be used when starting the NodeManager.
87+# These options will be appended to the options specified as YARN_OPTS
88+# and therefore may override any similar flags set in YARN_OPTS
89+#export YARN_NODEMANAGER_OPTS=
90+
91+# so that filenames w/ spaces are handled correctly in loops below
92+IFS=
93+
94+
95+# default log directory & file
96+if [ "$YARN_LOG_DIR" = "" ]; then
97+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
98+fi
99+if [ "$YARN_LOGFILE" = "" ]; then
100+ YARN_LOGFILE='yarn.log'
101+fi
102+
103+# default policy file for service-level authorization
104+if [ "$YARN_POLICYFILE" = "" ]; then
105+ YARN_POLICYFILE="hadoop-policy.xml"
106+fi
107+
108+# restore ordinary behaviour
109+unset IFS
110+
111+
112+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
113+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
114+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
115+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
116+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
117+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
118+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
119+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
120+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
121+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
122+fi
123+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
124+
125+
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/httpfs-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/httpfs-site.xml (revision 569)
@@ -0,0 +1,17 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<!--
3+ Licensed under the Apache License, Version 2.0 (the "License");
4+ you may not use this file except in compliance with the License.
5+ You may obtain a copy of the License at
6+
7+ http://www.apache.org/licenses/LICENSE-2.0
8+
9+ Unless required by applicable law or agreed to in writing, software
10+ distributed under the License is distributed on an "AS IS" BASIS,
11+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+ See the License for the specific language governing permissions and
13+ limitations under the License.
14+-->
15+<configuration>
16+
17+</configuration>
--- hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/configuration.xsl (nonexistent)
+++ hadoop_conf/tags/localhost-2.2/2.2.0s-1/etc/hadoop/configuration.xsl (revision 569)
@@ -0,0 +1,40 @@
1+<?xml version="1.0"?>
2+<!--
3+ Licensed to the Apache Software Foundation (ASF) under one or more
4+ contributor license agreements. See the NOTICE file distributed with
5+ this work for additional information regarding copyright ownership.
6+ The ASF licenses this file to You under the Apache License, Version 2.0
7+ (the "License"); you may not use this file except in compliance with
8+ the License. You may obtain a copy of the License at
9+
10+ http://www.apache.org/licenses/LICENSE-2.0
11+
12+ Unless required by applicable law or agreed to in writing, software
13+ distributed under the License is distributed on an "AS IS" BASIS,
14+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+ See the License for the specific language governing permissions and
16+ limitations under the License.
17+-->
18+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
19+<xsl:output method="html"/>
20+<xsl:template match="configuration">
21+<html>
22+<body>
23+<table border="1">
24+<tr>
25+ <td>name</td>
26+ <td>value</td>
27+ <td>description</td>
28+</tr>
29+<xsl:for-each select="property">
30+<tr>
31+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
32+ <td><xsl:value-of select="value"/></td>
33+ <td><xsl:value-of select="description"/></td>
34+</tr>
35+</xsl:for-each>
36+</table>
37+</body>
38+</html>
39+</xsl:template>
40+</xsl:stylesheet>
Show on old repository browser