From 0de1c8ee9bf1a5e3624c2583ec4a1605eca79853 Mon Sep 17 00:00:00 2001 From: Jeffrey Clark Date: Thu, 25 Apr 2024 20:43:25 -0500 Subject: [PATCH 1/2] (maint) uncouple from the puppet_litmus gem --- README.md | 24 +++--- lib/docker_helper.rb | 20 ++--- lib/inventory_helper.rb | 119 +++++++++++++++++++++++++++ lib/task_helper.rb | 32 ------- spec/spec_helper_local.rb | 19 +++++ spec/tasks/abs_spec.rb | 11 --- spec/tasks/lxd_spec.rb | 43 +++------- spec/tasks/provision_service_spec.rb | 12 +-- spec/tasks/vagrant_spec.rb | 6 +- spec/unit/docker_helper_spec.rb | 11 +-- spec/unit/inventory_helper_spec.rb | 79 ++++++++++++++++++ tasks/abs.json | 3 +- tasks/abs.rb | 44 +++++----- tasks/docker.json | 3 +- tasks/docker.rb | 19 +++-- tasks/docker_exp.json | 3 +- tasks/docker_exp.rb | 21 ++--- tasks/lxd.json | 3 +- tasks/lxd.rb | 28 ++----- tasks/provision_service.json | 3 +- tasks/provision_service.rb | 50 ++++------- tasks/vagrant.json | 3 +- tasks/vagrant.rb | 40 ++++----- 23 files changed, 347 insertions(+), 249 deletions(-) create mode 100644 lib/inventory_helper.rb create mode 100644 spec/spec_helper_local.rb create mode 100644 spec/unit/inventory_helper_spec.rb diff --git a/README.md b/README.md index e67c8177..fa08c9da 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ $ cat ~/.fog ##### Setting up a new machine ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::abs --targets localhost action=provision platform=ubuntu-1604-x86_64 inventory=/Users/tp/workspace/git/provision/inventory.yaml +$ bundle exec bolt task run provision::abs --targets localhost action=provision platform=ubuntu-1604-x86_64 Started on localhost... Finished on localhost: @@ -118,7 +118,7 @@ Ran on 1 node in 1.44 seconds ##### Tearing down a finished machine ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::abs --targets localhost action=tear_down inventory=/Users/tp/workspace/git/provision/inventory.yaml node_name=yh6f4djvz7o3te6.delivery.puppetlabs.net +$ bundle exec bolt task run provision::abs --targets localhost action=tear_down node_name=yh6f4djvz7o3te6.delivery.puppetlabs.net Started on localhost... Finished on localhost: @@ -139,7 +139,7 @@ Containers by default will be managed in the current [docker context](https://do #### Provision ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::docker --targets localhost action=provision platform=ubuntu:14.04 inventory=/Users/tp/workspace/git/provision/inventory.yaml +$ bundle exec bolt task run provision::docker --targets localhost action=provision platform=ubuntu:14.04 Started on localhost... Finished on localhost: @@ -166,13 +166,13 @@ These defaults can be overriden by passing the flags with different values i.e. ``` ```ruby -bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::docker --targets localhost action=provision platform=ubuntu:14.04 inventory=/Users/tp/workspace/git/provision/inventory.yaml vars='{ "docker_run_opts": ["-p 8086:8086", "-p 3000:3000"]}' +bundle exec bolt task run provision::docker --targets localhost action=provision platform=ubuntu:14.04 vars='{ "docker_run_opts": ["-p 8086:8086", "-p 3000:3000"]}' ``` #### Tear down ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::docker --targets localhost action=tear_down inventory=/Users/tp/workspace/git/provision/inventory.yaml node_name=localhost:2222 +$ bundle exec bolt task run provision::docker --targets localhost action=tear_down node_name=localhost:2222 Started on localhost... Finished on localhost: @@ -197,7 +197,7 @@ Tested with vagrant images: provision ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::vagrant --targets localhost action=provision platform=ubuntu/xenial64 inventory=/Users/tp/workspace/git/provision/inventory.yaml +$ bundle exec bolt task run provision::vagrant --targets localhost action=provision platform=ubuntu/xenial64 Started on localhost... Finished on localhost: @@ -212,7 +212,7 @@ Ran on 1 node in 51.98 seconds For multi-node provisioning, you can assign arbitrary tags to the nodes you deploy, by passing an optional YAML-string 'vars' to the bolt task. In the example below we are assigning the role of `k8s-controller` to the provisioned node. ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::vagrant --targets localhost action=provision platform=ubuntu/xenial64 inventory=/Users/tp/workspace/git/provision vars='role: k8s-controller' +$ bundle exec bolt task run provision::vagrant --targets localhost action=provision platform=ubuntu/xenial64 inventory=/Users/tp/workspace/git/provision vars='role: k8s-controller' ``` sudo secure_path fix @@ -223,7 +223,7 @@ This leads to errors when anything tries to execute `puppet` commands on the tes To add the Puppet agent binary path to the *secure_path* please run the `provision::fix_secure_path` Bolt task: ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::fix_secure_path path=/opt/puppetlabs/bin -i inventory.yaml -t ssh_nodes +$ bundle exec bolt task run provision::fix_secure_path path=/opt/puppetlabs/bin -i inventory.yaml -t ssh_nodes Started on 127.0.0.1:2222... Finished on 127.0.0.1:2222: @@ -235,7 +235,7 @@ Ran on 1 target in 0.84 sec tear_down ```ruby -$ bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::vagrant --targets localhost action=tear_down inventory=/Users/tp/workspace/git/provision/inventory.yaml node_name=127.0.0.1:2222 +$ bundle exec bolt task run provision::vagrant --targets localhost action=tear_down node_name=127.0.0.1:2222 Started on localhost... Finished on localhost: @@ -290,7 +290,7 @@ In the provision step you can invoke bundle exec rake 'litmus:provision_list[tes Manual invocation of the provision service task from a workflow can be done using: ```ruby -bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::provision_service --targets localhost action=provision platform=centos-7-v20200813 inventory=/Users/tp/workspace/git/provision/inventory.yaml vars='role: puppetserver' +bundle exec bolt task run provision::provision_service --targets localhost action=provision platform=centos-7-v20200813 inventory=/Users/tp/workspace/git/provision/inventory.yaml vars='role: puppetserver' ``` Or using Litmus: @@ -326,7 +326,7 @@ provision ```powershell PS> $env:LITMUS_HYPERV_VSWITCH = 'internal_nat' -PS> bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::vagrant --targets localhost action=provision platform=centos/7 inventory=/Users/tp/workspace/git/provision/inventory.yaml hyperv_smb_username=tp hyperv_smb_password=notMyrealPassword +PS> bundle exec bolt task run provision::vagrant --targets localhost action=provision platform=centos/7 hyperv_smb_username=tp hyperv_smb_password=notMyrealPassword Started on localhost... Finished on localhost: @@ -359,7 +359,7 @@ Testing/development/debugging it is better to use ruby directly, you will need t Testing using bolt, the second step ```ruby -bundle exec bolt --modulepath /Users/tp/workspace/git/ task run provision::docker --targets localhost action=provision platform=ubuntu:14.04 inventory=/Users/tp/workspace/git/provision/inventory.yaml +bundle exec bolt task run provision::docker --targets localhost action=provision platform=ubuntu:14.04 ``` ## License diff --git a/lib/docker_helper.rb b/lib/docker_helper.rb index d0c020ab..b0c5d403 100644 --- a/lib/docker_helper.rb +++ b/lib/docker_helper.rb @@ -1,10 +1,9 @@ # frozen_string_literal: true require 'json' -require 'puppet_litmus' -def docker_exec(container, command) - run_local_command("docker exec #{container} #{command}") +def docker_exec(container_id, command) + run_local_command("docker exec #{container_id} #{command}") end def docker_image_os_release_facts(image) @@ -34,18 +33,9 @@ def docker_image_os_release_facts(image) os_release_facts end -def docker_tear_down(node_name, inventory_location) - extend PuppetLitmus::InventoryManipulation - inventory_full_path = File.join(inventory_location, '/spec/fixtures/litmus_inventory.yaml') - raise "Unable to find '#{inventory_full_path}'" unless File.file?(inventory_full_path) - - inventory_hash = inventory_hash_from_inventory_file(inventory_full_path) - node_facts = facts_from_node(inventory_hash, node_name) - remove_docker = "docker rm -f #{node_facts['container_id']}" - run_local_command(remove_docker) - remove_node(inventory_hash, node_name) - puts "Removed #{node_name}" - File.open(inventory_full_path, 'w') { |f| f.write inventory_hash.to_yaml } +def docker_tear_down(container_id) + run_local_command("docker rm -f #{container_id}") + puts "Removed #{container_id}" { status: 'ok' } end diff --git a/lib/inventory_helper.rb b/lib/inventory_helper.rb new file mode 100644 index 00000000..86e8a1d5 --- /dev/null +++ b/lib/inventory_helper.rb @@ -0,0 +1,119 @@ +# frozen_string_literal: true + +require 'yaml' +require 'delegate' +require 'json' + +# simple bolt inventory manipulator +class InventoryHelper < SimpleDelegator + def initialize(location) + @location = location + super(refresh) + end + + # Load inventory from location in YAML format + # or generate a default structure + # + # @return [Hash] + def refresh + x = JSON.parse(YAML.load_file(@location).to_json) if File.file?(@location) + { 'version' => 2, 'groups' => [] }.merge(x || {}) + end + + # Save inventory to location in yaml format + def save + File.open(@location, 'wb+') { |f| f.write(to_yaml) } + self + end + + # Adds a node to a group specified, if group_name exists in inventory hash. + # + # @param node [Hash] node to add to the group + # @param group [String] group of nodes to limit the search for the node_name in + # @return [Hash] inventory_hash with node added to group if group_name exists in inventory hash. + def add(node, group) + # stringify keys + node = JSON.parse(node.to_json) + # check if group exists + if self['groups'].any? { |g| g['name'] == group } + self['groups'].each do |g| + g['targets'].push(node) if g['name'] == group + end + else + # add new group + self['groups'].push({ 'name' => group, 'targets' => [node] }) + end + + self + end + + # Lookup a node + # + # @param either [String] uri or name of node to find + # @param uri [String] uri of node to find + # @param name [String] name of node to find + # @param group [String] limit search to group + # @return [Hash] inventory target + def lookup(either = nil, uri: nil, name: nil, group: nil) + value = either || uri || name + + keys = [] + keys << 'uri' if uri || either + keys << 'name' if name || either + + self['groups'].each do |g| + next unless (group && group == g['name']) || group.nil? + g['targets'].each do |t| + return t if keys.include? t.key(value) + end + end + + raise "Failed to lookup target #{value}" + end + + # Remove node + # + # @param node [Hash] + # @return [Hash] inventory_hash with node of node_name removed. + def remove(node) + # stringify keys + node = JSON.parse(node.to_json) + self['groups'].map! do |g| + g['targets'].reject! { |target| target == node } + g + end + + self + end + + class << self + attr_accessor :instances + + def open(location = nil) + # Inventory location is an optional task parameter. + location = location.nil? ? Dir.pwd : location + location = if File.directory?(location) + # DEPRECATED: puppet_litmus <= 1.4.0 support + if Gem.loaded_specs['puppet_litmus'] && Gem.loaded_specs['puppet_litmus'].version <= Gem::Version.new('1.4.0') + File.join(location, 'spec', 'fixtures', 'litmus_inventory.yaml') + else + File.join(location, 'inventory.yaml') + end + else + location + end + + raise "directory for storing inventory does not exist: #{location}" unless File.exist?(File.dirname(location)) + + @instances ||= {} + @instances[location] = new(location) unless @instances.key? location + @instances[location] + end + end + + attr_reader :location + + protected + + attr_writer :location +end diff --git a/lib/task_helper.rb b/lib/task_helper.rb index d09d998a..0173e9de 100644 --- a/lib/task_helper.rb +++ b/lib/task_helper.rb @@ -1,37 +1,5 @@ # frozen_string_literal: true -def sanitise_inventory_location(location) - # Inventory location is an optional task parameter. - location = location.nil? ? Dir.pwd : location - # If not specified use the current directory + inventory.yaml - if File.exist?(location) && File.directory?(location) - # DEPRECATED: puppet_litmus <= 1.4.0 support - if Gem.loaded_specs['puppet_litmus'].version <= Gem::Version.new('1.4.0') - File.join(location, 'spec', 'fixtures', 'litmus_inventory.yaml') - else - File.join(location, 'inventory.yaml') - end - else - location - end -end - -def get_inventory_hash(inventory_full_path) - if File.file?(inventory_full_path) - inventory_hash_from_inventory_file(inventory_full_path) - else - { - 'version' => 2, - 'groups' => [ - { 'name' => 'docker_nodes', 'targets' => [] }, - { 'name' => 'lxd_nodes', 'targets' => [] }, - { 'name' => 'ssh_nodes', 'targets' => [] }, - { 'name' => 'winrm_nodes', 'targets' => [] }, - ] - } - end -end - def run_local_command(command, dir = Dir.pwd) require 'open3' stdout, stderr, status = Open3.capture3(command, chdir: dir) diff --git a/spec/spec_helper_local.rb b/spec/spec_helper_local.rb new file mode 100644 index 00000000..80c61b7e --- /dev/null +++ b/spec/spec_helper_local.rb @@ -0,0 +1,19 @@ +require 'fileutils' + +RSpec.configure do |rspec| + rspec.expect_with :rspec do |c| + c.max_formatted_output_length = nil + end +end + +RSpec.shared_context('with tmpdir') do + let(:tmpdir) { @tmpdir } # rubocop:disable RSpec/InstanceVariable + + around(:each) do |example| + Dir.mktmpdir('rspec-provision_test') do |t| + FileUtils.mkdir_p(File.join(t, 'spec', 'fixtures')) + @tmpdir = t + example.run + end + end +end diff --git a/spec/tasks/abs_spec.rb b/spec/tasks/abs_spec.rb index b4d37425..bc059dda 100644 --- a/spec/tasks/abs_spec.rb +++ b/spec/tasks/abs_spec.rb @@ -5,17 +5,6 @@ require_relative '../../tasks/abs' require 'yaml' -RSpec.shared_context('with tmpdir') do - let(:tmpdir) { @tmpdir } # rubocop:disable RSpec/InstanceVariable - - around(:each) do |example| - Dir.mktmpdir('rspec-provision_test') do |t| - @tmpdir = t - example.run - end - end -end - describe 'provision::abs' do let(:abs) { ABSProvision.new } let(:inventory_dir) { "#{tmpdir}/spec/fixtures" } diff --git a/spec/tasks/lxd_spec.rb b/spec/tasks/lxd_spec.rb index f8390f31..0c1bc7ca 100644 --- a/spec/tasks/lxd_spec.rb +++ b/spec/tasks/lxd_spec.rb @@ -1,5 +1,6 @@ # frozen_string_literal: true +require 'fileutils' require 'spec_helper' require 'webmock/rspec' require_relative '../../tasks/lxd' @@ -7,23 +8,13 @@ RSpec::Matchers.define_negated_matcher :not_raise_error, :raise_error -RSpec.shared_context('with tmpdir') do - let(:tmpdir) { @tmpdir } # rubocop:disable RSpec/InstanceVariable - - around(:each) do |example| - Dir.mktmpdir('rspec-provision_test') do |t| - @tmpdir = t - example.run - end - end -end - describe 'provision::lxd' do + include_context('with tmpdir') + let(:lxd) { LXDProvision.new } - let(:inventory_dir) { "#{tmpdir}/spec/fixtures" } - let(:inventory_file) { "#{inventory_dir}/litmus_inventory.yaml" } - let(:inventory_hash) { get_inventory_hash(inventory_file) } + let(:inventory_file) { tmpdir } + let(:inventory) { InventoryHelper.open(inventory_file) } let(:provision_input) do { @@ -114,12 +105,6 @@ } end - include_context('with tmpdir') - - before(:each) do - FileUtils.mkdir_p(inventory_dir) - end - describe '.run' do let(:task_input) { {} } let(:imposter) { instance_double('LXDProvision') } @@ -171,9 +156,6 @@ expect(lxd).to receive(:run_local_command) .with("lxc -q exec #{lxd_remote}:#{container_id} uptime") - LXDProvision.new.add_node_to_group(inventory_hash, JSON.parse(provision_output[:node].to_json), 'lxd_nodes') - - expect(File).to receive(:write).with(inventory_file, JSON.parse(inventory_hash.to_json).to_yaml) expect(lxd.task(**provision_input)).to eq(provision_output) end @@ -198,27 +180,22 @@ end context 'action=tear_down' do - before(:each) do - File.write(inventory_file, JSON.parse(inventory_hash.to_json).to_yaml) - end - it 'tears down successfully' do + inventory.add(provision_output[:node], 'lxd_nodes').save + expect(lxd).to receive(:run_local_command) .with("lxc -q delete #{lxd_remote}:#{container_id} -f") - LXDProvision.new.add_node_to_group(inventory_hash, JSON.parse(provision_output[:node].to_json), 'lxd_nodes') - File.write(inventory_file, inventory_hash.to_yaml) - expect(lxd.task(**tear_down_input)).to eq(tear_down_output) end it 'expect to raise error if no inventory' do - File.delete(inventory_file) - expect { lxd.task(**tear_down_input) }.to raise_error(StandardError, %r{Unable to find}) + expect { lxd.task(**tear_down_input) }.to raise_error(RuntimeError, %r{Failed to lookup target #{container_id}}) end it 'expect to raise error if node_name not in inventory' do - expect { lxd.task(**tear_down_input) }.to raise_error(StandardError, %r{node_name #{container_id} not found in inventory}) + inventory.save + expect { lxd.task(**tear_down_input) }.to raise_error(RuntimeError, %r{Failed to lookup target #{container_id}}) end end end diff --git a/spec/tasks/provision_service_spec.rb b/spec/tasks/provision_service_spec.rb index 9e98d08e..33d9c460 100644 --- a/spec/tasks/provision_service_spec.rb +++ b/spec/tasks/provision_service_spec.rb @@ -65,7 +65,7 @@ end describe '#provision' do - let(:inventory_location) { "#{Dir.pwd}/litmus_inventory.yaml" } + let(:inventory) { InventoryHelper.open("#{Dir.pwd}/litmus_inventory.yaml") } let(:vars) { nil } let(:platform) { 'centos-8' } let(:retry_attempts) { 8 } @@ -94,7 +94,7 @@ }, ) .to_return(status: 200, body: '', headers: {}) - expect { provision_service.provision(platform, inventory_location, vars, retry_attempts) }.to raise_error(RuntimeError) + expect { provision_service.provision(platform, inventory, vars, retry_attempts) }.to raise_error(RuntimeError) end end @@ -112,7 +112,7 @@ }, ) .to_return(status: 200, body: '', headers: {}) - expect { provision_service.provision(platform, inventory_location, vars, retry_attempts) }.to raise_error(RuntimeError) + expect { provision_service.provision(platform, inventory, vars, retry_attempts) }.to raise_error(RuntimeError) stub_request(:post, 'https://facade-release-6f3kfepqcq-ew.a.run.app/v1/provision') .with( body: '{"url":"https://api.github.com/repos/puppetlabs/puppetlabs-iis/actions/runs/1234567890","VMs":[{"cloud":null,"region":null,"zone":null,"images":["centos-8"]}]}', @@ -126,11 +126,11 @@ ) .to_return(status: 200, body: response_body.to_json, headers: {}) allow(File).to receive(:open) - expect(provision_service.provision(platform, inventory_location, vars, retry_attempts)[:status]).to eq('ok') + expect(provision_service.provision(platform, inventory, vars, retry_attempts)[:status]).to eq('ok') end end - context 'when response is avlid' do + context 'when response is valid' do it 'return valid response' do stub_request(:post, 'https://facade-release-6f3kfepqcq-ew.a.run.app/v1/provision') .with( @@ -146,7 +146,7 @@ .to_return(status: 200, body: response_body.to_json, headers: {}) allow(File).to receive(:open) - expect(provision_service.provision(platform, inventory_location, vars, retry_attempts)[:status]).to eq('ok') + expect(provision_service.provision(platform, inventory, vars, retry_attempts)[:status]).to eq('ok') end end end diff --git a/spec/tasks/vagrant_spec.rb b/spec/tasks/vagrant_spec.rb index f339b5e9..c294c74e 100644 --- a/spec/tasks/vagrant_spec.rb +++ b/spec/tasks/vagrant_spec.rb @@ -7,20 +7,22 @@ let(:provider) { 'virtualbox' } let(:platform) { 'generic/debian10' } + include_context('with tmpdir') + before(:each) do # Stub $stdin.read to return a predefined JSON string allow($stdin).to receive(:read).and_return({ platform: platform, action: 'provision', vars: 'role: worker1', - inventory: Dir.pwd.to_s, + inventory: tmpdir, enable_synced_folder: 'true', provider: provider, hyperv_vswitch: 'hyperv_vswitch', hyperv_smb_username: 'hyperv_smb_username' }.to_json) allow(Open3).to receive(:capture3).with(%r{vagrant up --provider #{provider}}, any_args).and_return(['', '', 0]).once - allow(File).to receive(:read).with(%r{#{Dir.pwd}/spec/fixtures/.vagrant}).and_return('some_unique_id') + allow(File).to receive(:read).with(%r{#{tmpdir}.*\.vagrant}).and_return('some_unique_id') allow(Open3).to receive(:capture3).with(%r{vagrant ssh-config}, any_args).and_return(['', '', 0]).once allow(Net::SSH).to receive(:start).and_return(true) require_relative '../../tasks/vagrant' diff --git a/spec/unit/docker_helper_spec.rb b/spec/unit/docker_helper_spec.rb index 51a006e2..4736766b 100644 --- a/spec/unit/docker_helper_spec.rb +++ b/spec/unit/docker_helper_spec.rb @@ -81,19 +81,10 @@ end describe '.docker_tear_down' do - it 'expect to raise error if inventory file is not found' do - allow(File).to receive(:file?).and_return(false) - expect { docker_tear_down(container_id, inventory_location) }.to raise_error(RuntimeError, "Unable to find '#{inventory_location}/spec/fixtures/litmus_inventory.yaml'") - end - it 'expect to return status ok' do - allow(File).to receive(:file?).with(full_inventory_location).and_return(true) - allow(File).to receive(:exist?).with(full_inventory_location).and_return(true) - allow(File).to receive(:open).with(full_inventory_location, anything).and_yield(StringIO.new(inventory_yaml.dup)) allow(self).to receive(:run_local_command).with("docker rm -f #{container_id}") - allow(self).to receive(:remove_node).and_return(nil) expect { - expect(docker_tear_down(container_id, inventory_location)).to eql({ status: 'ok' }) + expect(docker_tear_down(container_id)).to eql({ status: 'ok' }) }.to output("Removed #{container_id}\n").to_stdout end end diff --git a/spec/unit/inventory_helper_spec.rb b/spec/unit/inventory_helper_spec.rb new file mode 100644 index 00000000..9e60af98 --- /dev/null +++ b/spec/unit/inventory_helper_spec.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'inventory_helper' + +describe InventoryHelper, type: :class do + include_context('with tmpdir') + + let(:inventory_file) { tmpdir } + let(:inventory) { described_class.open(inventory_file) } + + let(:node_uri) { 'testing' } + let(:node_name) { node_uri } + let(:node_data) do + { + uri: node_uri, + name: node_name, + config: { + transport: 'whocares' + } + } + end + + describe '.open' do + it 'correctly opens and saves new inventory file' do + expect(inventory.save).to be_a described_class + end + + context 'non-existent inventory path' do + let(:inventory_file) { File.join(tmpdir, 'testing/testing.yaml') } + + it 'fails to open inventory file' do + expect { inventory.save }.to raise_error(RuntimeError, %r{directory for storing inventory does not exist}) + end + end + end + + describe '.lookup' do + let(:node_name) { 'somethingelse' } + + before(:each) do + inventory.add(node_data, 'whocares').save + end + + it 'by uri' do + expect(inventory.lookup(node_uri)).to be_a Hash + end + + it 'by name' do + expect(inventory.lookup(name: node_name)).to be_a Hash + end + + it 'fallback to name' do + expect(inventory.lookup(node_name)).to be_a Hash + end + + it 'only in group' do + expect(inventory.lookup(node_uri, group: 'whocares')).to be_a Hash + end + + it 'not in group' do + expect { inventory.lookup(node_uri, group: 'nogroup') }.to raise_error(RuntimeError, "Failed to lookup target #{node_uri}") + end + end + + describe '.add' do + it 'add a node' do + expect(inventory.add(node_data, 'whocares').save).to be_a described_class + end + end + + describe '.remove' do + it 'remove a node' do + expect(inventory.add(node_data, 'whocares').save).to be_a described_class + expect(inventory.remove(inventory.lookup(node_uri))).to be_a described_class + expect { inventory.delete(inventory.lookup(node_uri)) }.to raise_error(RuntimeError, "Failed to lookup target #{node_uri}") + end + end +end diff --git a/tasks/abs.json b/tasks/abs.json index ff4b1dfb..0b209f48 100644 --- a/tasks/abs.json +++ b/tasks/abs.json @@ -26,6 +26,7 @@ } }, "files": [ - "provision/lib/task_helper.rb" + "provision/lib/task_helper.rb", + "provision/lib/inventory_helper.rb" ] } diff --git a/tasks/abs.rb b/tasks/abs.rb index 122e8955..513cc776 100755 --- a/tasks/abs.rb +++ b/tasks/abs.rb @@ -4,15 +4,13 @@ require 'json' require 'net/http' require 'yaml' -require 'puppet_litmus' require 'etc' require 'date' require_relative '../lib/task_helper' +require_relative '../lib/inventory_helper' # Provision and teardown vms through ABS. class ABSProvision - include PuppetLitmus::InventoryManipulation - # Enforces a k8s.infracore.puppet.net domain, but allows selection of prod, # stage, etc hostname from the environment variable +ABS_SUBDOMAIN+ so that # CI can test vms from staging. @@ -23,7 +21,7 @@ def abs_host "#{subdomain}.k8s.infracore.puppet.net" end - def provision(platform, inventory_location, vars) + def provision(platform, inventory, vars) uri = URI.parse("https://#{abs_host}/api/v2/request") jenkins_build_url = if ENV['CI'] == 'true' && ENV['TRAVIS'] == 'true' ENV.fetch('TRAVIS_JOB_WEB_URL', nil) @@ -85,7 +83,6 @@ def provision(platform, inventory_location, vars) raise "Timeout: unable to get a 200 response in #{poll_duration} seconds" if reply.code != '200' - inventory_hash = get_inventory_hash(inventory_location) data = JSON.parse(reply.body) data.each do |host| if platform_uses_ssh(host['type']) @@ -109,31 +106,27 @@ def provision(platform, inventory_location, vars) var_hash = YAML.safe_load(vars) node['vars'] = var_hash end - add_node_to_group(inventory_hash, node, group_name) + inventory.add(node, group_name) end - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } + inventory.save { status: 'ok', nodes: data.length } end - def tear_down(node_name, inventory_location) - if File.file?(inventory_location) - inventory_hash = inventory_hash_from_inventory_file(inventory_location) - facts = facts_from_node(inventory_hash, node_name) - platform = facts['platform'] - job_id = facts['job_id'] - end + def tear_down(node_name, inventory) + node = inventory.lookup(node_name, group: 'ssh_nodes') targets_to_remove = [] - inventory_hash['groups'].each do |group| - group['targets'].each do |node| - targets_to_remove.push(node['uri']) if node['facts']['job_id'] == job_id + inventory['groups'].each do |group| + group['targets'].each do |job_node| + targets_to_remove.push(job_node) if job_node['facts']['job_id'] == node['facts']['job_id'] end end + uri = URI.parse("https://#{abs_host}/api/v2/return") headers = { 'X-AUTH-TOKEN' => token_from_fogfile('abs'), 'Content-Type' => 'application/json' } - payload = { 'job_id' => job_id, - 'hosts' => [{ 'hostname' => node_name, 'type' => platform, 'engine' => 'vmpooler' }] } + payload = { 'job_id' => node['job_id'], + 'hosts' => [{ 'hostname' => node['uri'], 'type' => node['facts']['platform'], 'engine' => 'vmpooler' }] } http = Net::HTTP.new(uri.host, uri.port) http.use_ssl = true request = Net::HTTP::Post.new(uri.request_uri, headers) @@ -143,16 +136,17 @@ def tear_down(node_name, inventory_location) raise "Error: #{reply}: #{reply.message}" unless reply.code == '200' targets_to_remove.each do |target| - remove_node(inventory_hash, target) + inventory.remove(target) end - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } - { status: 'ok', removed: targets_to_remove } + inventory.save + + { status: 'ok', removed: targets_to_remove.map { |t| t['name'] || t['uri'] } } end def task(action:, platform: nil, node_name: nil, inventory: nil, vars: nil, **_kwargs) - inventory_location = sanitise_inventory_location(inventory) - result = provision(platform, inventory_location, vars) if action == 'provision' - result = tear_down(node_name, inventory_location) if action == 'tear_down' + inventory = InventoryHelper.open(inventory) + result = provision(platform, inventory, vars) if action == 'provision' + result = tear_down(node_name, inventory) if action == 'tear_down' result end diff --git a/tasks/docker.json b/tasks/docker.json index d14fe2c7..8f2df56b 100644 --- a/tasks/docker.json +++ b/tasks/docker.json @@ -27,6 +27,7 @@ }, "files": [ "provision/lib/task_helper.rb", - "provision/lib/docker_helper.rb" + "provision/lib/docker_helper.rb", + "provision/lib/inventory_helper.rb" ] } diff --git a/tasks/docker.rb b/tasks/docker.rb index b4c5e862..9053bdea 100755 --- a/tasks/docker.rb +++ b/tasks/docker.rb @@ -4,9 +4,9 @@ require 'json' require 'uri' require 'yaml' -require 'puppet_litmus' require_relative '../lib/task_helper' require_relative '../lib/docker_helper' +require_relative '../lib/inventory_helper' def install_ssh_components(distro, version, container) case distro @@ -116,9 +116,7 @@ def random_ssh_forwarding_port(start_port = 52_222, end_port = 52_999) random_ssh_forwarding_port(new_start_port, new_end_port) end -def provision(docker_platform, inventory_location, vars) - include PuppetLitmus::InventoryManipulation - inventory_hash = get_inventory_hash(inventory_location) +def provision(docker_platform, inventory, vars) os_release_facts = docker_image_os_release_facts(docker_platform) distro = os_release_facts['ID'] version = os_release_facts['VERSION_ID'] @@ -181,8 +179,7 @@ def provision(docker_platform, inventory_location, vars) inventory_node['name'] = container_id inventory_node['facts']['container_id'] = container_id - add_node_to_group(inventory_hash, inventory_node, 'ssh_nodes') - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } + inventory.add(inventory_node, 'ssh_nodes').save { status: 'ok', node_name: inventory_node['name'], node: inventory_node } end @@ -191,7 +188,7 @@ def provision(docker_platform, inventory_location, vars) platform = params['platform'] action = params['action'] node_name = params['node_name'] -inventory_location = sanitise_inventory_location(params['inventory']) +inventory = InventoryHelper.open(params['inventory']) vars = params['vars'] raise 'specify a node_name when tearing down' if action == 'tear_down' && node_name.nil? raise 'specify a platform when provisioning' if action == 'provision' && platform.nil? @@ -208,8 +205,12 @@ def provision(docker_platform, inventory_location, vars) end begin - result = provision(platform, inventory_location, vars) if action == 'provision' - result = docker_tear_down(node_name, inventory_location) if action == 'tear_down' + result = provision(platform, inventory, vars) if action == 'provision' + if action == 'tear_down' + node = inventory.lookup(name: node_name, group: 'ssh_nodes') + result = docker_tear_down(node['facts']['container_id']) + inventory.remove(node).save + end puts result.to_json exit 0 rescue StandardError => e diff --git a/tasks/docker_exp.json b/tasks/docker_exp.json index d14fe2c7..8f2df56b 100644 --- a/tasks/docker_exp.json +++ b/tasks/docker_exp.json @@ -27,6 +27,7 @@ }, "files": [ "provision/lib/task_helper.rb", - "provision/lib/docker_helper.rb" + "provision/lib/docker_helper.rb", + "provision/lib/inventory_helper.rb" ] } diff --git a/tasks/docker_exp.rb b/tasks/docker_exp.rb index f7888fc0..b651da9d 100755 --- a/tasks/docker_exp.rb +++ b/tasks/docker_exp.rb @@ -3,16 +3,14 @@ require 'json' require 'yaml' -require 'puppet_litmus' require_relative '../lib/task_helper' require_relative '../lib/docker_helper' +require_relative '../lib/inventory_helper' # TODO: detect what shell to use @shell_command = 'bash -lc' -def provision(docker_platform, inventory_location, vars) - include PuppetLitmus::InventoryManipulation - inventory_hash = get_inventory_hash(inventory_location) +def provision(docker_platform, inventory, vars) os_release_facts = docker_image_os_release_facts(docker_platform) inventory_node = { @@ -53,15 +51,14 @@ def provision(docker_platform, inventory_location, vars) inventory_node['uri'] = container_id inventory_node['facts']['container_id'] = container_id - add_node_to_group(inventory_hash, inventory_node, 'docker_nodes') - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } + inventory.add(inventory_node, 'docker_nodes').save { status: 'ok', node_name: inventory_node['name'], node: inventory_node } end params = JSON.parse($stdin.read) action = params['action'] -inventory_location = sanitise_inventory_location(params['inventory']) +inventory = InventoryHelper.open(params['inventory']) node_name = params['node_name'] platform = params['platform'] vars = params['vars'] @@ -80,11 +77,15 @@ def provision(docker_platform, inventory_location, vars) end begin - result = provision(platform, inventory_location, vars) if action == 'provision' - result = docker_tear_down(node_name, inventory_location) if action == 'tear_down' + result = provision(platform, inventory, vars) if action == 'provision' + if action == 'tear_down' + node = inventory.lookup(name: node_name, group: 'docker_nodes') + result = docker_tear_down(node['facts']['container_id']) + inventory.remove(node).save + end puts result.to_json exit 0 rescue StandardError => e - puts({ _error: { kind: 'provision/docker_exp_failure', msg: e.message } }.to_json) + puts({ _error: { kind: 'provision/docker_exp_failure', msg: e.message, backtrace: e.backtrace } }.to_json) exit 1 end diff --git a/tasks/lxd.json b/tasks/lxd.json index 15ea7e7c..590b95ef 100644 --- a/tasks/lxd.json +++ b/tasks/lxd.json @@ -51,6 +51,7 @@ } }, "files": [ - "provision/lib/task_helper.rb" + "provision/lib/task_helper.rb", + "provision/lib/inventory_helper.rb" ] } diff --git a/tasks/lxd.rb b/tasks/lxd.rb index 58325892..7689d8d7 100755 --- a/tasks/lxd.rb +++ b/tasks/lxd.rb @@ -3,15 +3,13 @@ require 'json' require 'yaml' -require 'puppet_litmus' require_relative '../lib/task_helper' +require_relative '../lib/inventory_helper' # Provision and teardown instances on LXD class LXDProvision - include PuppetLitmus::InventoryManipulation - attr_reader :node_name, :retries - attr_reader :platform, :inventory, :inventory_full_path, :vars, :action, :options + attr_reader :platform, :inventory, :vars, :action, :options def provision lxd_remote = options[:remote] || lxd_default_remote @@ -76,30 +74,23 @@ def provision node[:vars] = vars unless vars.nil? - add_node_to_group(inventory, node, 'lxd_nodes') - save_inventory + inventory.add(node, 'lxd_nodes').save { status: 'ok', node_name: container_id, node: node } end def tear_down - config = config_from_node(inventory, node_name) - node_facts = facts_from_node(inventory, node_name) + node = inventory.lookup(node_name, group: 'lxd_nodes') - raise "node_name #{node_name} not found in inventory" unless config + raise "node_name #{node_name} not found in inventory" unless node - run_local_command("lxc -q delete #{config['lxd']['remote']}:#{node_facts['container_id']} -f") + run_local_command("lxc -q delete #{node['config']['lxd']['remote']}:#{node['facts']['container_id']} -f") - remove_node(inventory, node_name) - save_inventory + inventory.remove(node).save { status: 'ok' } end - def save_inventory - File.write(inventory_full_path, JSON.parse(inventory.to_json).to_yaml) - end - def task(**params) finalize_params!(params) @@ -109,10 +100,7 @@ def task(**params) @node_name = params.delete(:node_name) @vars = YAML.safe_load(params.delete(:vars) || '~') - @inventory_full_path = sanitise_inventory_location(params.delete(:inventory)) - raise "Unable to find '#{@inventory_full_path}'" unless (action == 'provision') || File.file?(@inventory_full_path) - - @inventory = get_inventory_hash(@inventory_full_path) + @inventory = InventoryHelper.open(params.delete(:inventory)) @options = params.reject { |k, _v| k.start_with? '_' } method(action).call diff --git a/tasks/provision_service.json b/tasks/provision_service.json index 584b1824..d0c5c845 100644 --- a/tasks/provision_service.json +++ b/tasks/provision_service.json @@ -31,6 +31,7 @@ } }, "files": [ - "provision/lib/task_helper.rb" + "provision/lib/task_helper.rb", + "provision/lib/inventory_helper.rb" ] } diff --git a/tasks/provision_service.rb b/tasks/provision_service.rb index b5690dae..d7eea61f 100755 --- a/tasks/provision_service.rb +++ b/tasks/provision_service.rb @@ -4,16 +4,14 @@ require 'json' require 'net/http' require 'yaml' -require 'puppet_litmus' require 'etc' require_relative '../lib/task_helper' +require_relative '../lib/inventory_helper' # Provision and teardown vms through provision service. class ProvisionService RETRY_COUNT = 3 - include PuppetLitmus::InventoryManipulation - def default_uri 'https://facade-release-6f3kfepqcq-ew.a.run.app/v1/provision' end @@ -85,7 +83,7 @@ def invoke_cloud_request(params, uri, job_url, verb, retry_attempts) end end - def provision(platform, inventory_location, vars, retry_attempts) + def provision(platform, inventory, vars, retry_attempts) # Call the provision service with the information necessary and write the inventory file locally if ENV['GITHUB_RUN_ID'] @@ -119,27 +117,15 @@ def provision(platform, inventory_location, vars, retry_attempts) unless vars.nil? var_hash = YAML.safe_load(vars) - response_hash['groups'].each do |bg| - bg['targets'].each do |trgts| - trgts['vars'] = var_hash - end - end end - if File.file?(inventory_location) - inventory_hash = inventory_hash_from_inventory_file(inventory_location) - inventory_hash['groups'].each do |g| - response_hash['groups'].each do |bg| - g['targets'] = g['targets'] + bg['targets'] if g['name'] == bg['name'] - end - end - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } - else - FileUtils.mkdir_p(File.join(Dir.pwd, '/spec/fixtures')) - File.open(inventory_location, 'wb') do |f| - f.write(YAML.dump(response_hash)) + response_hash['groups'].each do |bg| + bg['targets'].each do |trgts| + trgts['vars'] = var_hash if var_hash + inventory.add(trgts, bg['name']) end end + inventory.save { status: 'ok', @@ -148,26 +134,22 @@ def provision(platform, inventory_location, vars, retry_attempts) } end - def tear_down(platform, inventory_location, _vars, retry_attempts) + def tear_down(node_name, inventory, _vars, retry_attempts) # remove all provisioned resources uri = URI.parse(ENV['SERVICE_URL'] || default_uri) - # rubocop:disable Style/GuardClause - if File.file?(inventory_location) - inventory_hash = inventory_hash_from_inventory_file(inventory_location) - facts = facts_from_node(inventory_hash, platform) - job_id = facts['uuid'] - response = invoke_cloud_request(job_id, uri, '', 'delete', retry_attempts) - response.to_json - end - # rubocop:enable Style/GuardClause + node = inventory.lookup(name: node_name) + facts = node['facts'] + job_id = facts['uuid'] + response = invoke_cloud_request(job_id, uri, '', 'delete', retry_attempts) + response.to_json end def self.run params = JSON.parse($stdin.read) params.transform_keys!(&:to_sym) action, node_name, platform, vars, retry_attempts, inventory_location = params.values_at(:action, :node_name, :platform, :vars, :retry_attempts, :inventory) - inventory_location = sanitise_inventory_location(inventory_location) + inventory = InventoryHelper.open(inventory_location) runner = new begin @@ -175,11 +157,11 @@ def self.run when 'provision' raise 'specify a platform when provisioning' if platform.to_s.empty? - result = runner.provision(platform, inventory_location, vars, retry_attempts) + result = runner.provision(platform, inventory, vars, retry_attempts) when 'tear_down' raise 'specify a node_name when tearing down' if node_name.nil? - result = runner.tear_down(node_name, inventory_location, vars, retry_attempts) + result = runner.tear_down(node_name, inventory, vars, retry_attempts) else result = { _error: { kind: 'provision_service/argument_error', msg: "Unknown action '#{action}'" } } end diff --git a/tasks/vagrant.json b/tasks/vagrant.json index ceb2d6ae..40e084a6 100644 --- a/tasks/vagrant.json +++ b/tasks/vagrant.json @@ -65,6 +65,7 @@ } }, "files": [ - "provision/lib/task_helper.rb" + "provision/lib/task_helper.rb", + "provision/lib/inventory_helper.rb" ] } diff --git a/tasks/vagrant.rb b/tasks/vagrant.rb index 566f9ba7..439dafc7 100755 --- a/tasks/vagrant.rb +++ b/tasks/vagrant.rb @@ -8,6 +8,7 @@ require 'fileutils' require 'net/ssh' require_relative '../lib/task_helper' +require_relative '../lib/inventory_helper' def vagrant_version return @vagrant_version if defined?(@vagrant_version) @@ -115,7 +116,7 @@ def configure_remoting(platform, remoting_config_path, password) remoting_config end -def provision(platform, inventory_location, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url, password, vars) +def provision(platform, inventory, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url, password, vars) if platform_is_windows?(platform) && !supports_windows_platform? raise "To provision a Windows VM with this task you must have vagrant 2.2.0 or later installed; vagrant seems to be installed at v#{vagrant_version}" end @@ -124,10 +125,8 @@ def provision(platform, inventory_location, enable_synced_folder, provider, cpus provider = on_windows? ? 'hyperv' : 'virtualbox' end - include PuppetLitmus - inventory_hash = get_inventory_hash(inventory_location) - vagrant_dirs = Dir.glob("#{File.join(File.dirname(inventory_location), '.vagrant')}/*/").map { |d| File.basename(d) } - @vagrant_env = File.expand_path(File.join(File.dirname(inventory_location), '.vagrant', get_vagrant_dir(platform, vagrant_dirs))) + vagrant_dirs = Dir.glob("#{File.join(File.dirname(inventory.location), '.vagrant')}/*/").map { |d| File.basename(d) } + @vagrant_env = File.expand_path(File.join(File.dirname(inventory.location), '.vagrant', get_vagrant_dir(platform, vagrant_dirs))) FileUtils.mkdir_p @vagrant_env generate_vagrantfile(File.join(@vagrant_env, 'Vagrantfile'), platform, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url) command = "vagrant up --provider #{provider}" @@ -140,6 +139,7 @@ def provision(platform, inventory_location, enable_synced_folder, provider, cpus if platform_uses_ssh(platform) node = { + 'name' => node_name, 'uri' => node_name, 'config' => { 'transport' => 'ssh', @@ -166,6 +166,7 @@ def provision(platform, inventory_location, enable_synced_folder, provider, cpus # TODO: Need to figure out where SSL comes from remote_config['uses_ssl'] ||= false # TODO: Is the default _actually_ false? node = { + 'name' => node_name, 'uri' => node_name, 'config' => { 'transport' => 'winrm', @@ -190,23 +191,17 @@ def provision(platform, inventory_location, enable_synced_folder, provider, cpus var_hash = YAML.safe_load(vars) node['vars'] = var_hash end - add_node_to_group(inventory_hash, node, group_name) - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } + inventory.add(node, group_name).save { status: 'ok', node_name: node_name, node: node } end -def tear_down(node_name, inventory_location) - include PuppetLitmus +def tear_down(node_name, inventory) command = 'vagrant destroy -f' - if File.file?(inventory_location) - inventory_hash = inventory_hash_from_inventory_file(inventory_location) - vagrant_env = facts_from_node(inventory_hash, node_name)['vagrant_env'] - run_local_command(command, vagrant_env) - remove_node(inventory_hash, node_name) - FileUtils.rm_r(vagrant_env) - end - warn "Removed #{node_name}" - File.open(inventory_location, 'w') { |f| f.write inventory_hash.to_yaml } + node = inventory.lookup(name: node_name, group: 'ssh_nodes') + vagrant_env = node['facts']['vagrant_env'] + run_local_command(command, vagrant_env) + FileUtils.rm_r(vagrant_env) + inventory.remote(node).save { status: 'ok' } end @@ -216,7 +211,7 @@ def tear_down(node_name, inventory_location) action = params['action'] node_name = params['node_name'] vars = params['vars'] -inventory_location = sanitise_inventory_location(params['inventory']) +inventory = InventoryHelper.open(params['inventory']) enable_synced_folder = params['enable_synced_folder'].nil? ? ENV.fetch('VAGRANT_ENABLE_SYNCED_FOLDER', nil) : params['enable_synced_folder'] enable_synced_folder = enable_synced_folder.casecmp('true').zero? if enable_synced_folder.is_a?(String) provider = params['provider'].nil? ? ENV.fetch('VAGRANT_PROVIDER', nil) : params['provider'] @@ -242,11 +237,8 @@ def tear_down(node_name, inventory_location) end begin - if action == 'provision' - result = provision(platform, inventory_location, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url, password, -vars) - end - result = tear_down(node_name, inventory_location) if action == 'tear_down' + result = provision(platform, inventory, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url, password, vars) if action == 'provision' + result = tear_down(node_name, inventory) if action == 'tear_down' puts result.to_json exit 0 rescue StandardError => e From f677336c78ef2ec5a1b0b78d79d1f90296d80e54 Mon Sep 17 00:00:00 2001 From: Jeffrey Clark Date: Tue, 9 Jul 2024 03:05:54 -0500 Subject: [PATCH 2/2] (maint) fix spec tests when run sequentially running `rake spec` (sequential tests) or `pdk test unit -v` resulted in none of the tests in spec/unit being run. --- spec/tasks/vagrant_spec.rb | 8 ++-- tasks/vagrant.rb | 78 ++++++++++++++++++++------------------ 2 files changed, 45 insertions(+), 41 deletions(-) diff --git a/spec/tasks/vagrant_spec.rb b/spec/tasks/vagrant_spec.rb index c294c74e..d768c192 100644 --- a/spec/tasks/vagrant_spec.rb +++ b/spec/tasks/vagrant_spec.rb @@ -2,6 +2,7 @@ require 'rspec' require 'spec_helper' require 'net/ssh' +require_relative '../../tasks/vagrant' describe 'vagrant' do let(:provider) { 'virtualbox' } @@ -25,12 +26,11 @@ allow(File).to receive(:read).with(%r{#{tmpdir}.*\.vagrant}).and_return('some_unique_id') allow(Open3).to receive(:capture3).with(%r{vagrant ssh-config}, any_args).and_return(['', '', 0]).once allow(Net::SSH).to receive(:start).and_return(true) - require_relative '../../tasks/vagrant' end it 'provisions a new vagrant box when action is provision' do - expect { vagrant }.to output(%r{"status":"ok"}).to_stdout - expect { vagrant }.to output(%r{"platform":"generic/debian10"}).to_stdout - expect { vagrant }.to output(%r{"role":"worker1"}).to_stdout + expect { vagrant }.to raise_error(SystemExit).and output( + include('"status":"ok"', '"platform":"generic/debian10"', '"role":"worker1"'), + ).to_stdout end end diff --git a/tasks/vagrant.rb b/tasks/vagrant.rb index 439dafc7..bdff1de9 100755 --- a/tasks/vagrant.rb +++ b/tasks/vagrant.rb @@ -205,43 +205,47 @@ def tear_down(node_name, inventory) { status: 'ok' } end -params = JSON.parse($stdin.read) -warn params -platform = params['platform'] -action = params['action'] -node_name = params['node_name'] -vars = params['vars'] -inventory = InventoryHelper.open(params['inventory']) -enable_synced_folder = params['enable_synced_folder'].nil? ? ENV.fetch('VAGRANT_ENABLE_SYNCED_FOLDER', nil) : params['enable_synced_folder'] -enable_synced_folder = enable_synced_folder.casecmp('true').zero? if enable_synced_folder.is_a?(String) -provider = params['provider'].nil? ? ENV.fetch('VAGRANT_PROVIDER', nil) : params['provider'] -cpus = params['cpus'].nil? ? ENV.fetch('VAGRANT_CPUS', nil) : params['cpus'] -memory = params['memory'].nil? ? ENV.fetch('VAGRANT_MEMORY', nil) : params['memory'] -hyperv_vswitch = params['hyperv_vswitch'].nil? ? ENV.fetch('VAGRANT_HYPERV_VSWITCH', nil) : params['hyperv_vswitch'] -hyperv_smb_username = params['hyperv_smb_username'].nil? ? ENV.fetch('VAGRANT_HYPERV_SMB_USERNAME', nil) : params['hyperv_smb_username'] -hyperv_smb_password = params['hyperv_smb_password'].nil? ? ENV.fetch('VAGRANT_HYPERV_SMB_PASSWORD', nil) : params['hyperv_smb_password'] -box_url = params['box_url'].nil? ? ENV.fetch('VAGRANT_BOX_URL', nil) : params['box_url'] -password = params['password'].nil? ? ENV.fetch('VAGRANT_PASSWORD', nil) : params['password'] -raise 'specify a node_name when tearing down' if action == 'tear_down' && node_name.nil? -raise 'specify a platform when provisioning' if action == 'provision' && platform.nil? - -unless node_name.nil? ^ platform.nil? - case action - when 'tear_down' - raise 'specify only a node_name, not platform, when tearing down' - when 'provision' - raise 'specify only a platform, not node_name, when provisioning' - else - raise 'specify only one of: node_name, platform' +def vagrant + params = JSON.parse($stdin.read) + warn params + platform = params['platform'] + action = params['action'] + node_name = params['node_name'] + vars = params['vars'] + inventory = InventoryHelper.open(params['inventory']) + enable_synced_folder = params['enable_synced_folder'].nil? ? ENV.fetch('VAGRANT_ENABLE_SYNCED_FOLDER', nil) : params['enable_synced_folder'] + enable_synced_folder = enable_synced_folder.casecmp('true').zero? if enable_synced_folder.is_a?(String) + provider = params['provider'].nil? ? ENV.fetch('VAGRANT_PROVIDER', nil) : params['provider'] + cpus = params['cpus'].nil? ? ENV.fetch('VAGRANT_CPUS', nil) : params['cpus'] + memory = params['memory'].nil? ? ENV.fetch('VAGRANT_MEMORY', nil) : params['memory'] + hyperv_vswitch = params['hyperv_vswitch'].nil? ? ENV.fetch('VAGRANT_HYPERV_VSWITCH', nil) : params['hyperv_vswitch'] + hyperv_smb_username = params['hyperv_smb_username'].nil? ? ENV.fetch('VAGRANT_HYPERV_SMB_USERNAME', nil) : params['hyperv_smb_username'] + hyperv_smb_password = params['hyperv_smb_password'].nil? ? ENV.fetch('VAGRANT_HYPERV_SMB_PASSWORD', nil) : params['hyperv_smb_password'] + box_url = params['box_url'].nil? ? ENV.fetch('VAGRANT_BOX_URL', nil) : params['box_url'] + password = params['password'].nil? ? ENV.fetch('VAGRANT_PASSWORD', nil) : params['password'] + raise 'specify a node_name when tearing down' if action == 'tear_down' && node_name.nil? + raise 'specify a platform when provisioning' if action == 'provision' && platform.nil? + + unless node_name.nil? ^ platform.nil? + case action + when 'tear_down' + raise 'specify only a node_name, not platform, when tearing down' + when 'provision' + raise 'specify only a platform, not node_name, when provisioning' + else + raise 'specify only one of: node_name, platform' + end end -end -begin - result = provision(platform, inventory, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url, password, vars) if action == 'provision' - result = tear_down(node_name, inventory) if action == 'tear_down' - puts result.to_json - exit 0 -rescue StandardError => e - puts({ _error: { kind: 'provision/vagrant_failure', msg: e.message } }.to_json) - exit 1 + begin + result = provision(platform, inventory, enable_synced_folder, provider, cpus, memory, hyperv_vswitch, hyperv_smb_username, hyperv_smb_password, box_url, password, vars) if action == 'provision' + result = tear_down(node_name, inventory) if action == 'tear_down' + puts result.to_json + exit 0 + rescue StandardError => e + puts({ _error: { kind: 'provision/vagrant_failure', msg: e.message } }.to_json) + exit 1 + end end + +vagrant if __FILE__ == $PROGRAM_NAME