-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun
executable file
·163 lines (148 loc) · 4.99 KB
/
run
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
#!/usr/bin/env bash
generate_keys() {
if [ ! -d ./keys ]; then
mkdir ./keys
fi
if [ ! -f ./keys/id_rsa ] || [ ! -f ./keys/id_rsa.pub ]; then
rm -rf ./keys/*
ssh-keygen -b 2048 -t rsa -f ./keys/id_rsa -q -N "" -C "hadoop"
fi
}
install_vagrant_plugins() {
vagrant plugin install vagrant-triggers
vagrant plugin install vagrant-vbguest
}
download_resources() {
if [ ! -d resources ]; then
mkdir -p resources
fi
if [ ! -f resources/hadoop.tar.gz ]; then
curl -f0 -o resources/hadoop.tar.gz https://archive.apache.org/dist/hadoop/core/hadoop-2.7.3/hadoop-2.7.3.tar.gz
else
echo "hadoop.tar.gz already exists. Delete it to generate a new one";
fi
if [ ! -f resources/spark.tgz ]; then
curl -f0 -o resources/spark.tgz http://mirrors.gigenet.com/apache/spark/spark-2.2.1/spark-2.2.1-bin-hadoop2.7.tgz
else
echo "hadoop.tar.gz already exists. Delete it to generate a new one";
fi
}
vagrant_gen() {
if [ -f Vagrantfile ]; then
echo "Vagrant file already exists. Delete it to generate a new one";
else
sed_opt="-i"
if [ `uname` == "Darwin" ]; then
sed_opt="-i.bak"
fi
sed "s/#nodes#/$NODE_COUNT/g" Vagrantfile.dst > Vagrantfile
sed $sed_opt "s/#ipaddr#/$IP_ADDR/g" Vagrantfile
sed $sed_opt "s/#hostname#/$HOST_NAME/g" Vagrantfile
sed $sed_opt "s/#aws_access_key#/$AWS_ACCESS_KEY_ID/g" Vagrantfile
sed $sed_opt "s/#aws_secret_key#/$AWS_SECRET_ACCESS_KEY_ID/g" Vagrantfile
sed $sed_opt "s/#aws_keypair#/$AWS_KEYPAIR_NAME/g" Vagrantfile
if [ `uname` == "Darwin" ]; then
rm -rf Vagrantfile.bak
fi
fi
}
hdfs_conf_gen() {
if [ ! -d hdfs/xml ]; then
mkdir -p hdfs/xml
fi
if [ -f hdfs/xml/core-site.xml ]; then
echo "core-site.xml already exists. Delete it to generate a new one";
else
sed "s/#hostname#/$HOST_NAME/g" hdfs/dst/core-site.xml.dst > hdfs/xml/core-site.xml
fi
if [ -f hdfs/xml/hdfs-site.xml ]; then
echo "hdfs-site.xml already exists. Delete it to generate a new one";
else
cat hdfs/dst/hdfs-site.xml.dst > hdfs/xml/hdfs-site.xml
fi
if [ -f hdfs/xml/mapred-site.xml ]; then
echo "mapred-site.xml already exists. Delete it to generate a new one";
else
cat hdfs/dst/mapred-site.xml.dst > hdfs/xml/mapred-site.xml
fi
if [ -f hdfs/xml/yarn-site.xml ]; then
echo "yarn-site.xml already exists. Delete it to generate a new one";
else
cat hdfs/dst/yarn-site.xml.dst > hdfs/xml/yarn-site.xml
fi
if [ -f hdfs/install.sh ]; then
echo "install.sh already exists. Delete it to generate a new one";
else
sed "s/#hostname#/$HOST_NAME/g" hdfs/install.sh.dst > hdfs/install.sh
chmod +x hdfs/install.sh
fi
}
# OPTS=`getopt -o ki:t:h:a:b:c: --long keys,ipaddr:,nodes:,hostname:,aws-access-key:,aws-secret-key:,aws-key-pair: $*`
# if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
# echo $OPTS
# eval set -- "$OPTS"
NODE_COUNT="2"
HOST_NAME="hadoop-spark.box"
IP_ADDR="192.168.10.10"
AWS_ACCESS_KEY_ID=""
AWS_SECRET_ACCESS_KEY=""
AWS_KEYPAIR_NAME=""
while [[ $# -gt 0 ]]; do
case $1 in
-i|--ipaddr)
if [[ $2 =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
IP_ADDR=$2
else
echo "Invalid starting ip using default"
fi
shift 2;
;;
-t|--nodes)
if [[ $2 =~ ^[0-9]+$ ]]; then
NODE_COUNT=$2
else
echo "Invalid node count using default"
fi
shift 2;
;;
-h|--hostname)
if [[ $2 =~ ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ ]]; then
HOST_NAME=$2
else
echo "Invalid host name using default"
fi
shift 2;
;;
-a|--aws-access-key)
AWS_ACCESS_KEY_ID=$2;
shift 2;
;;
-b|--aws-secret-key)
AWS_SECRET_ACCESS_KEY=$2
shift 2;
;;
-c|--aws-key-pair)
AWS_KEYPAIR_NAME=$2
shift 2;
;;
-k|--keys)
rm -rf ./keys/*
shift;
;;
*) echo "Usage: ./run.sh [-i] <start_ip> [-n] <node_count> [-h] <host_name> -a -k"
echo "-i|--ipaddr <start ip> this will be the master node ip. slaves will use the incremented number"
echo "-n|--nodes <node_count> total number of slaves to be built other than master."
echo "-h|--hostname <host_name> this hostname will be the master node. slaves will use the incremented number"
echo "-k|--keys will regenerate ssh keys"
echo "-a|--aws-access-key <key> AWS generated access key"
echo "-b|--aws-secret-key <key> AWS generated secret key"
echo "-c|--aws-key-pair <keypair> AWS created ssh key pair name"
break;
;;
esac
done
generate_keys
download_resources
vagrant_gen
hdfs_conf_gen
vagrant up