--- /dev/null
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+#JMXLOCALONLY=false
+
+#JAVA_OPTS=""
+
+# Make sure to include the original classpath since it contains the zookeeper
+# jars
+#CLASSPATH="$CLASSPATH:YOURCLASSPATH"
--- /dev/null
+description "zookeeper centralized coordination service"
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+limit nofile 8192 8192
+
+pre-start script
+ [ -r "/usr/share/java/zookeeper.jar" ] || exit 0
+ [ -r "/etc/zookeeper/conf/environment" ] || exit 0
+ . /etc/zookeeper/conf/environment
+ [ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR
+ chown $USER:$GROUP $ZOO_LOG_DIR
+end script
+
+script
+ . /etc/zookeeper/conf/environment
+ [ -r /etc/default/zookeeper ] && . /etc/default/zookeeper
+ if [ -z "$JMXDISABLE" ]; then
+ JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY"
+ fi
+ exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper \
+ -- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \
+ -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG
+end script
Vagrant.configure(2) do |config|
# Define and configure SolrCloud cluster
cloudservers = {
- "node1" => "192.168.2.4",
- "node2" => "192.168.2.5",
- "node3" => "192.168.2.6"
+ "solr1" => "192.168.2.4",
+ "solr2" => "192.168.2.5"
}
cloudservers.each do |server_name, server_ip|
cloudconfig.vm.box = "debian/contrib-jessie64"
cloudconfig.vm.host_name = server_name.to_s
cloudconfig.vm.network "private_network", ip: server_ip
+ cloudconfig.vm.provision "ansible" do |ansible|
+ ansible.playbook = "solr.yml"
+ end
end
end
zookeeper.vm.box = "debian/contrib-jessie64"
zookeeper.vm.host_name = "zookeeper"
zookeeper.vm.network "private_network", ip: "192.168.2.3"
+ zookeeper.vm.provision "ansible" do |ansible|
+ ansible.playbook = "zookeeper.yml"
+ end
end
# Load balancer node
- config.vm.define "loadbalancer" do |loadbalancer|
- loadbalancer.vm.box = "debian/contrib-jessie64"
- loadbalancer.vm.host_name = "loadbalancer"
- loadbalancer.vm.network "private_network", ip: "192.168.2.2"
- end
+ # config.vm.define "loadbalancer" do |loadbalancer|
+ # loadbalancer.vm.box = "debian/contrib-jessie64"
+ # loadbalancer.vm.host_name = "loadbalancer"
+ # loadbalancer.vm.network "private_network", ip: "192.168.2.2"
+ # end
# Disable the default shared folder, share out project folder as /vagrant for all boxes
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "..", "/vagrant"
-
- # config.vm.provision "ansible" do |ansible|
- # ansible.playbook = "dev-deploy.yml"
- # end
+
end
+++ /dev/null
----
-- hosts: default
-
- vars:
-
- tasks:
- - name: Update apt-cache
- become: yes
- apt: update-cache=yes
-
- - name: Install dependencies from apt
- become: yes
- apt: name={{ item }} state=present
- with_items:
- - openjdk-7-jdk
-
- - name: Create deployment environment
- become: yes
- file: path={{ item }} state=directory
- with_items:
- - /usr/share/masterkey/lui
- - /var/lib/masterkey/lui/data/lui
- - /var/log/masterkey/lui
- - /etc/masterkey
-
- - name: Create lui-solr account
- become: yes
- user: name=lui-solr state=present system=yes home=/var/lib/masterkey/lui
-
- - name: Update permissions on deployment directories
- become: yes
- file: path={{ item }} owner=lui-solr recurse=yes
- with_items:
- - /var/lib/masterkey/lui
- - /var/log/masterkey/lui
-
- - name: Link Solr configuration
- become: yes
- file: src=/vagrant/conf path=/etc/masterkey/lui state=link
-
- - name: Link Solr defaults
- become: yes
- file: src=/etc/masterkey/lui/lui-solr.in.sh path=/etc/default/lui-solr.in.sh state=link
-
- - name: Install Solr binary
- become: yes
- command: /vagrant/dist/install_solr_service.sh /vagrant/dist/solr-5.5.1.tgz -d /var/lib/masterkey/lui -i /usr/share/masterkey/lui -s lui-solr -u lui-solr -f creates=/usr/share/masterkey/lui/solr-5.5.1
-
- - name: Remove extra files created by Solr install
- become: yes
- file: path={{ item }} state=absent
- with_items:
- - /var/lib/masterkey/lui/log4j.properties
- - /var/lib/masterkey/lui/logs
- - /var/lib/masterkey/lui/data/solr.xml
--- /dev/null
+---
+- hosts: all
+
+ vars:
+
+ tasks:
+ - name: Update apt-cache
+ become: yes
+ apt: update-cache=yes
+
+ - name: Install dependencies from apt
+ become: yes
+ apt: name={{ item }} state=present
+ with_items:
+ - openjdk-7-jdk
+
+ - name: Create deployment environment
+ become: yes
+ file: path={{ item }} state=directory
+ with_items:
+ - /usr/share/masterkey/lui
+ - /var/lib/masterkey/lui/solr/lui
+ - /var/log/masterkey/lui
+ - /etc/masterkey/lui
+
+ - name: Create lui-solr account
+ become: yes
+ user: name=lui-solr state=present system=yes home=/var/lib/masterkey/lui
+
+ - name: Update permissions on deployment directories
+ become: yes
+ file: path={{ item }} owner=lui-solr recurse=yes
+ with_items:
+ - /var/lib/masterkey/lui
+ - /var/log/masterkey/lui
+
+ - name: Link Solr configuration
+ become: yes
+ file: src=/vagrant/conf/solr path=/etc/masterkey/lui/solr state=link
+
+ - name: Link Solr defaults
+ become: yes
+ file: src=/etc/masterkey/lui/solr/lui-solr.in.sh path=/etc/default/lui-solr.in.sh state=link
+
+ - name: Install Solr binary
+ become: yes
+ command: /vagrant/dist/install_solr_service.sh /vagrant/dist/solr-5.5.1.tgz -d /var/lib/masterkey/lui -i /usr/share/masterkey/lui -s lui-solr -u lui-solr -f creates=/usr/share/masterkey/lui/solr-5.5.1
+
+ - name: Remove extra files created by Solr install
+ become: yes
+ file: path={{ item }} state=absent
+ with_items:
+ - /var/lib/masterkey/lui/log4j.properties
+ - /var/lib/masterkey/lui/logs
+ - /var/lib/masterkey/lui/data/solr.xml
--- /dev/null
+---
+- hosts: all
+
+ vars:
+
+ tasks:
+ - name: Update apt-cache
+ become: yes
+ apt: update-cache=yes
+
+ - name: Install dependencies from apt
+ become: yes
+ apt: name={{ item }} state=present
+ with_items:
+ - openjdk-7-jdk
+
+ - name: Create deployment environment
+ become: yes
+ file: path={{ item }} state=directory
+ with_items:
+ - /usr/share/masterkey/lui
+ - /var/lib/masterkey/lui/zookeeper
+ - /var/log/masterkey/lui
+ - /etc/masterkey/lui
+
+ - name: Create lui-solr account
+ become: yes
+ user: name=lui-solr state=present system=yes home=/var/lib/masterkey/lui
+
+ - name: Update permissions on deployment directories
+ become: yes
+ file: path={{ item }} owner=lui-solr recurse=yes
+ with_items:
+ - /var/lib/masterkey/lui
+ - /var/log/masterkey/lui
+
+ - name: Install Zookeeper
+ become: yes
+ unarchive: src=/vagrant/dist/zookeeper-3.4.6.tar.gz dest=/usr/share/masterkey/lui copy=no creates=/usr/share/masterkey/lui/zookeeper-3.4.6
+
+ - name: Link Zookeeper
+ become: yes
+ file: src=/usr/share/masterkey/lui/zookeeper-3.4.6 path=/usr/share/masterkey/lui/zookeeper state=link
--- /dev/null
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: zookeeper
+# Required-Start: $remote_fs
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: centralized coordination service
+# Description: ZooKeeper is a centralized service for maintaining
+# configuration information, naming, providing distributed
+# synchronization, and providing group services.
+### END INIT INFO
+
+# Author: Foo Bar <foobar@baz.org>
+#
+# Please remove the "Author" lines above and replace them
+# with your own name if you copy and modify this script.
+
+# Do NOT "set -e"
+
+# Exit if the package is not installed
+# Test that libzookeeper-java is installed
+[ -r "/usr/share/java/zookeeper.jar" ] || exit 0
+# Test that zookeeper is installed and not purged
+[ -r "/etc/zookeeper/conf/environment" ] || exit 0
+. /etc/zookeeper/conf/environment
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="centralized coordination service"
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
+. /lib/lsb/init-functions
+
+is_running()
+{
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $JAVA --user $USER --test > /dev/null \
+ || return 1
+ return 0
+}
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ is_running || return 1
+
+ if [ ! -d $PIDDIR ]
+ then
+ mkdir -p $PIDDIR
+ fi
+ chown $USER:$GROUP $PIDDIR
+
+ if [ ! -d $ZOO_LOG_DIR ]
+ then
+ mkdir -p $ZOO_LOG_DIR
+ fi
+ chown $USER:$GROUP $ZOO_LOG_DIR
+
+ start-stop-daemon --start --quiet \
+ --pidfile $PIDFILE \
+ --make-pidfile \
+ --chuid $USER:$GROUP \
+ --background \
+ --exec $JAVA -- \
+ -cp $CLASSPATH \
+ $JAVA_OPTS \
+ -Dzookeeper.log.dir=${ZOO_LOG_DIR} \
+ -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} \
+ $ZOOMAIN $ZOOCFG \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ is_running && return 1
+
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ [ "$RETVAL" = 0 ] && rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+case "$1" in
+ start)
+ if [ "x$JMXDISABLE" = "x" ]
+ then
+ [ "$VERBOSE" != no ] && log_action_msg "$NAME: JMX enabled by default"
+ # for some reason these two options are necessary on jdk6 on Ubuntu
+ # accord to the docs they are not necessary, but otw jconsole cannot
+ # do a local attach
+ JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY"
+ else
+ [ "$VERBOSE" != no ] && log_action_msg "$NAME: JMX disabled by user request"
+ fi
+
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p $PIDFILE "$NAME" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+: