diff --git a/lib/task_helper.rb b/lib/task_helper.rb index 5d7adb4..3d116b1 100644 --- a/lib/task_helper.rb +++ b/lib/task_helper.rb @@ -9,7 +9,15 @@ def get_inventory_hash(inventory_full_path) if File.file?(inventory_full_path) inventory_hash_from_inventory_file(inventory_full_path) else - { 'version' => 2, 'groups' => [{ 'name' => 'docker_nodes', 'targets' => [] }, { 'name' => 'ssh_nodes', 'targets' => [] }, { 'name' => 'winrm_nodes', 'targets' => [] }] } + { + 'version' => 2, + 'groups' => [ + { 'name' => 'docker_nodes', 'targets' => [] }, + { 'name' => 'lxd_nodes', 'targets' => [] }, + { 'name' => 'ssh_nodes', 'targets' => [] }, + { 'name' => 'winrm_nodes', 'targets' => [] }, + ] + } end end diff --git a/spec/tasks/lxd_spec.rb b/spec/tasks/lxd_spec.rb new file mode 100644 index 0000000..aaeeb05 --- /dev/null +++ b/spec/tasks/lxd_spec.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'webmock/rspec' +require_relative '../../tasks/lxd' +require 'yaml' + +RSpec::Matchers.define_negated_matcher :not_raise_error, :raise_error + +RSpec.shared_context('with tmpdir') do + let(:tmpdir) { @tmpdir } # rubocop:disable RSpec/InstanceVariable + + around(:each) do |example| + Dir.mktmpdir('rspec-provision_test') do |t| + @tmpdir = t + example.run + end + end +end + +describe 'provision::lxd' do + let(:lxd) { LXDProvision.new } + + let(:inventory_dir) { "#{tmpdir}/spec/fixtures" } + let(:inventory_file) { "#{inventory_dir}/litmus_inventory.yaml" } + let(:inventory_hash) { get_inventory_hash(inventory_file) } + + let(:provision_input) do + { + action: 'provision', + platform: 'images:foobar/1', + inventory: tmpdir + } + end + let(:tear_down_input) do + { + action: 'tear_down', + node_name: container_id, + inventory: tmpdir + } + end + + let(:lxd_remote) { 'fake' } + let(:lxd_flags) { [] } + let(:lxd_platform) { nil } + let(:container_id) { lxd_init_output } + let(:lxd_init_output) { 'random-host' } + + let(:provision_output) do + { + status: 'ok', + node_name: container_id, + node: { + uri: container_id, + config: { + transport: 'lxd', + lxd: { + remote: lxd_remote, + 'shell-command': 'sh -lc' + } + }, + facts: { + provisioner: 'lxd', + container_id: container_id, + platform: lxd_platform + } + } + } + end + + let(:tear_down_output) do + { + status: 'ok', + removed: container_id, + } + end + + include_context('with tmpdir') + + before(:each) do + FileUtils.mkdir_p(inventory_dir) + end + + describe '.run' do + let(:task_input) { {} } + let(:imposter) { instance_double('LXDProvision') } + + task_tests = [ + [ { action: 'provision', platform: 'test' }, 'success', true ], + [ { action: 'provision', node_name: 'test' }, 'do not specify node_name', false ], + [ { action: 'provision' }, 'platform required', false ], + [ { action: 'tear_down', node_name: 'test' }, 'success', true ], + [ { action: 'tear_down' }, 'node_name required', false ], + [ { action: 'tear_down', platform: 'test' }, 'do not specify platform', false ], + ] + + task_tests.each do |v| + it "expect arguments '#{v[0]}' return '#{v[1]}'#{v[2] ? '' : ' and raise error'}" do + allow(LXDProvision).to receive(:new).and_return(imposter) + allow(imposter).to receive(:task).and_return(v[1]) + allow($stdin).to receive(:read).and_return(v[0].to_json) + if v[2] + expect { LXDProvision.run }.to output(%r{#{v[1]}}).to_stdout + else + expect { LXDProvision.run }.to output(%r{#{v[1]}}).to_stdout.and raise_error(SystemExit) + end + end + end + end + + describe '.task' do + context 'action=provision' do + let(:lxd_platform) { provision_input[:platform] } + + before(:each) do + expect(lxd).to receive(:run_local_command) + .with('lxc -q remote get-default').and_return(lxd_remote) + expect(lxd).to receive(:run_local_command) + .with("lxc -q init #{lxd_platform} #{lxd_remote}: #{lxd_flags.join(' ')}").and_return(lxd_init_output) + expect(lxd).to receive(:run_local_command) + .with("lxc -q start #{lxd_remote}:#{container_id}").and_return(lxd_init_output) + end + + it 'provisions successfully' do + expect(lxd).to receive(:run_local_command) + .with("lxc -q exec #{lxd_remote}:#{container_id} uptime") + + LXDProvision.new.add_node_to_group(inventory_hash, JSON.parse(provision_output[:node].to_json), 'lxd_nodes') + + expect(File).to receive(:write).with(inventory_file, JSON.parse(inventory_hash.to_json).to_yaml) + expect(lxd.task(**provision_input)).to eq(provision_output) + end + + it 'when retries=0 try once but ignore the raised error' do + provision_input[:retries] = 0 + + expect(lxd).to receive(:run_local_command) + .with("lxc -q exec #{lxd_remote}:#{container_id} uptime").and_raise(StandardError) + + expect(lxd.task(**provision_input)).to eq(provision_output) + end + + it 'max retries then deletes the instance' do + expect(lxd).to receive(:run_local_command) + .exactly(3).times + .with("lxc -q exec #{lxd_remote}:#{container_id} uptime").and_raise(StandardError) + expect(lxd).to receive(:run_local_command) + .with("lxc -q delete #{lxd_remote}:#{container_id} -f") + + expect { lxd.task(**provision_input) }.to raise_error(StandardError, %r{Giving up waiting for #{lxd_remote}:#{container_id}}) + end + end + + context 'action=tear_down' do + before(:each) do + File.write(inventory_file, JSON.parse(inventory_hash.to_json).to_yaml) + end + + it 'tears down successfully' do + expect(lxd).to receive(:run_local_command) + .with("lxc -q delete #{lxd_remote}:#{container_id} -f") + + LXDProvision.new.add_node_to_group(inventory_hash, JSON.parse(provision_output[:node].to_json), 'lxd_nodes') + File.write(inventory_file, inventory_hash.to_yaml) + + expect(lxd.task(**tear_down_input)).to eq(tear_down_output) + end + + it 'expect to raise error if no inventory' do + File.delete(inventory_file) + expect { lxd.task(**tear_down_input) }.to raise_error(StandardError, %r{Unable to find}) + end + + it 'expect to raise error if node_name not in inventory' do + expect { lxd.task(**tear_down_input) }.to raise_error(StandardError, %r{node_name #{container_id} not found in inventory}) + end + end + end +end diff --git a/tasks/lxd.json b/tasks/lxd.json new file mode 100644 index 0000000..15ea7e7 --- /dev/null +++ b/tasks/lxd.json @@ -0,0 +1,56 @@ +{ + "puppet_task_version": 1, + "supports_noop": false, + "description": "Provision/Tear down an instance on LXD", + "parameters": { + "action": { + "description": "Action to perform, tear_down or provision", + "type": "Enum[provision, tear_down]", + "default": "provision" + }, + "inventory": { + "description": "Location of the inventory file", + "type": "Optional[String[1]]" + }, + "node_name": { + "description": "The name of the instance", + "type": "Optional[String[1]]" + }, + "platform": { + "description": "LXD image to use, eg images:ubuntu/22.04", + "type": "Optional[String[1]]" + }, + "profiles": { + "description": "LXD Profiles to apply", + "type": "Optional[Array[String[1]]]" + }, + "storage": { + "description": "LXD Storage pool name", + "type": "Optional[String[1]]" + }, + "instance_type": { + "description": "LXD Instance type", + "type": "Optional[String[1]]" + }, + "vm": { + "description": "Provision as a virtual-machine instead of a container", + "type": "Optional[Boolean]" + }, + "remote": { + "description": "LXD remote, defaults to the LXD client configured default remote", + "type": "Optional[String]" + }, + "retries": { + "description": "On provision check the instance is accepting commands, will be deleted if retries exceeded, 0 to disable", + "type": "Integer", + "default": 5 + }, + "vars": { + "description": "YAML string of key/value pairs to add to the inventory vars section", + "type": "Optional[String[1]]" + } + }, + "files": [ + "provision/lib/task_helper.rb" + ] +} diff --git a/tasks/lxd.rb b/tasks/lxd.rb new file mode 100755 index 0000000..adca52d --- /dev/null +++ b/tasks/lxd.rb @@ -0,0 +1,154 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'json' +require 'yaml' +require 'puppet_litmus' +require_relative '../lib/task_helper' + +# Provision and teardown instances on LXD +class LXDProvision + include PuppetLitmus::InventoryManipulation + + attr_reader :node_name, :retries + attr_reader :platform, :inventory, :inventory_full_path, :vars, :action, :options + + def provision + lxd_remote = options[:remote] || lxd_default_remote + + lxd_flags = [] + options[:profiles]&.each { |p| lxd_flags << "--profile #{p}" } + lxd_flags << "--type #{options[:instance_type]}" if options[:instance_type] + lxd_flags << "--storage #{options[:storage]}" if options[:storage] + lxd_flags << '--vm' if options[:vm] + + creation_command = "lxc -q init #{platform} #{lxd_remote}: #{lxd_flags.join(' ')}" + container_id = run_local_command(creation_command).chomp.split[-1] + + begin + start_command = "lxc -q start #{lxd_remote}:#{container_id}" + run_local_command(start_command) + + # wait here for a bit until instance can accept commands + state_command = "lxc -q exec #{lxd_remote}:#{container_id} uptime" + attempt = 0 + begin + run_local_command(state_command) + rescue StandardError => e + raise "Giving up waiting for #{lxd_remote}:#{container_id} to enter running state. Got error: #{e.message}" if retries > 0 && attempt > retries + + attempt += 1 + sleep 2**attempt + retry if retries > 0 + end + rescue StandardError + run_local_command("lxc -q delete #{lxd_remote}:#{container_id} -f") + raise + end + + facts = { + provisioner: 'lxd', + container_id: container_id, + platform: platform + } + + options.each do |option| + facts[:"lxd_#{option[0]}"] = option[1] unless option[1].to_s.empty? + end + + node = { + uri: container_id, + config: { + transport: 'lxd', + lxd: { + remote: lxd_remote, + 'shell-command': 'sh -lc' + } + }, + facts: facts + } + + node[:vars] = vars unless vars.nil? + + add_node_to_group(inventory, node, 'lxd_nodes') + save_inventory + + { status: 'ok', node_name: container_id, node: node } + end + + def tear_down + config = config_from_node(inventory, node_name) + node_facts = facts_from_node(inventory, node_name) + + raise "node_name #{node_name} not found in inventory" unless config + + run_local_command("lxc -q delete #{config['lxd']['remote']}:#{node_facts['container_id']} -f") + + remove_node(inventory, node_name) + save_inventory + + { status: 'ok', removed: node_name } + end + + def save_inventory + File.write(inventory_full_path, JSON.parse(inventory.to_json).to_yaml) + end + + def task(**params) + finalize_params!(params) + + @action = params.delete(:action) + @retries = params.delete(:retries)&.to_i || 1 + @platform = params.delete(:platform) + @node_name = params.delete(:node_name) + @vars = YAML.safe_load(params.delete(:vars) || '~') + + @inventory_full_path = File.join(sanitise_inventory_location(params.delete(:inventory)), 'spec/fixtures/litmus_inventory.yaml') + raise "Unable to find '#{@inventory_full_path}'" unless (action == 'provision') || File.file?(@inventory_full_path) + + @inventory = get_inventory_hash(@inventory_full_path) + + @options = params.reject { |k, _v| k.start_with? '_' } + method(action).call + end + + def lxd_default_remote + @lxd_default_remote ||= run_local_command('lxc -q remote get-default').chomp + @lxd_default_remote + end + + # add environment provided parameters (puppet litmus) + def finalize_params!(params) + ['remote', 'profiles', 'storage', 'instance_type', 'vm'].each do |p| + params[p] = YAML.safe_load(ENV.fetch("LXD_#{p.upcase}", '~')) if params[p].to_s.empty? + end + params.compact! + end + + class << self + def run + params = JSON.parse($stdin.read, symbolize_names: true) + + case params[:action] + when 'tear_down' + raise 'do not specify platform when tearing down' if params[:platform] + raise 'node_name required when tearing down' unless params[:node_name] + when 'provision' + raise 'do not specify node_name when provisioning' if params[:node_name] + raise 'platform required, when provisioning' unless params[:platform] + else + raise "invalid action: #{params[:action]}" if params[:action] + + raise 'must specify a valid action' + end + + result = new.task(**params) + puts result.to_json + rescue StandardError => e + puts({ _error: { kind: 'provision/lxd_failure', msg: e.message, details: { backtraces: e.backtrace } } }.to_json) + exit 1 + end + end +end + +LXDProvision.run if __FILE__ == $PROGRAM_NAME