Compare commits
3 Commits
master
...
stable-web
Author | SHA1 | Date |
---|---|---|
RJ Spiker | fd66ec3081 | |
Chris Roberts | 52e3036af9 | |
Chris Roberts | 583d6d5af2 |
20
.ci/build.sh
20
.ci/build.sh
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
csource="${BASH_SOURCE[0]}"
|
||||
while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done
|
||||
root="$( cd -P "$( dirname "$csource" )/../" && pwd )"
|
||||
|
||||
. "${root}/.ci/init.sh"
|
||||
|
||||
pushd "${root}" > "${output}"
|
||||
|
||||
# Build our gem
|
||||
wrap gem build *.gemspec \
|
||||
"Failed to build Vagrant RubyGem"
|
||||
|
||||
# Get the path of our new gem
|
||||
g=(vagrant*.gem)
|
||||
gem=$(printf "%s" "${g}")
|
||||
|
||||
wrap aws s3 cp "${gem}" "${ASSETS_PRIVATE_BUCKET}/${repository}/vagrant-master.gem" \
|
||||
"Failed to store Vagrant RubyGem master build"
|
432
.ci/common.sh
432
.ci/common.sh
|
@ -1,432 +0,0 @@
|
|||
# last-modified: Tue Jan 14 20:37:58 UTC 2020
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Path to file used for output redirect
|
||||
# and extracting messages for warning and
|
||||
# failure information sent to slack
|
||||
function output_file() {
|
||||
printf "/tmp/.ci-output"
|
||||
}
|
||||
|
||||
# Write failure message, send error to configured
|
||||
# slack, and exit with non-zero status. If an
|
||||
# "$(output_file)" file exists, the last 5 lines will be
|
||||
# included in the slack message.
|
||||
#
|
||||
# $1: Failure message
|
||||
function fail() {
|
||||
(>&2 echo "ERROR: ${1}")
|
||||
if [ -f ""$(output_file)"" ]; then
|
||||
slack -s error -m "ERROR: ${1}" -f "$(output_file)" -T 5
|
||||
else
|
||||
slack -s error -m "ERROR: ${1}"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Write warning message, send warning to configured
|
||||
# slack
|
||||
#
|
||||
# $1: Warning message
|
||||
function warn() {
|
||||
(>&2 echo "WARN: ${1}")
|
||||
if [ -f ""$(output_file)"" ]; then
|
||||
slack -s warn -m "WARNING: ${1}" -f "$(output_file)"
|
||||
else
|
||||
slack -s warn -m "WARNING: ${1}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output to
|
||||
# a file (file is used within fail mesage on when
|
||||
# command is unsuccessful). Final argument is the
|
||||
# error message used when the command fails.
|
||||
#
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function wrap() {
|
||||
i=$(("${#}" - 1))
|
||||
wrap_raw "${@:1:$i}"
|
||||
if [ $? -ne 0 ]; then
|
||||
cat "$(output_file)"
|
||||
fail "${@:$#}"
|
||||
fi
|
||||
rm "$(output_file)"
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output to
|
||||
# a file. Exit status is returned.
|
||||
function wrap_raw() {
|
||||
rm -f "$(output_file)"
|
||||
"${@}" > "$(output_file)" 2>&1
|
||||
return $?
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output to
|
||||
# a file (file is used within fail mesage on when
|
||||
# command is unsuccessful). Command output will be
|
||||
# streamed during execution. Final argument is the
|
||||
# error message used when the command fails.
|
||||
#
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function wrap_stream() {
|
||||
i=$(("${#}" - 1))
|
||||
wrap_stream_raw "${@:1:$i}"
|
||||
if [ $? -ne 0 ]; then
|
||||
fail "${@:$#}"
|
||||
fi
|
||||
rm "$(output_file)"
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output
|
||||
# to a file. Command output will be streamed
|
||||
# during execution. Exit status is returned
|
||||
function wrap_stream_raw() {
|
||||
rm -f "$(output_file)"
|
||||
"${@}" > "$(output_file)" 2>&1 &
|
||||
pid=$!
|
||||
until [ -f "$(output_file)" ]; do
|
||||
sleep 0.1
|
||||
done
|
||||
tail -f --quiet --pid "${pid}" "$(output_file)"
|
||||
wait "${pid}"
|
||||
return $?
|
||||
}
|
||||
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function pkt_wrap() {
|
||||
wrap packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution
|
||||
# $@: Command to execute
|
||||
function pkt_wrap_raw() {
|
||||
wrap_raw packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution with output streaming
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function pkt_wrap_stream() {
|
||||
wrap_stream packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution with output streaming
|
||||
# $@: Command to execute
|
||||
function pkt_wrap_stream_raw() {
|
||||
wrap_stream_raw packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Generates location within the asset storage
|
||||
# bucket to retain built assets.
|
||||
function asset_location() {
|
||||
if [ "${tag}" = "" ]; then
|
||||
dst="${ASSETS_PRIVATE_LONGTERM}/${repository}/${ident_ref}/${short_sha}"
|
||||
else
|
||||
if [[ "${tag}" = *"+"* ]]; then
|
||||
dst="${ASSETS_PRIVATE_LONGTERM}/${repository}/${tag}"
|
||||
else
|
||||
dst="${ASSETS_PRIVATE_BUCKET}/${repository}/${tag}"
|
||||
fi
|
||||
fi
|
||||
echo -n "${dst}"
|
||||
}
|
||||
|
||||
# Upload assets to the asset storage bucket.
|
||||
#
|
||||
# $1: Path to asset file or directory to upload
|
||||
function upload_assets() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "Parameter required for asset upload"
|
||||
fi
|
||||
if [ -d "${1}" ]; then
|
||||
wrap aws s3 cp --recursive "${1}" "$(asset_location)/" \
|
||||
"Upload to asset storage failed"
|
||||
else
|
||||
wrap aws s3 cp "${1}" "$(asset_location)/" \
|
||||
"Upload to asset storage failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Download assets from the asset storage bucket. If
|
||||
# destination is not provided, remote path will be
|
||||
# used locally.
|
||||
#
|
||||
# $1: Path to asset or directory to download
|
||||
# $2: Optional destination for downloaded assets
|
||||
function download_assets() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "At least one parameter required for asset download"
|
||||
fi
|
||||
if [ "${2}" = "" ]; then
|
||||
dst="${1#/}"
|
||||
else
|
||||
dst="${2}"
|
||||
fi
|
||||
mkdir -p "${dst}"
|
||||
src="$(asset_location)/${1#/}"
|
||||
remote=$(aws s3 ls "${src}")
|
||||
if [[ "${remote}" = *" PRE "* ]]; then
|
||||
mkdir -p "${dst}"
|
||||
wrap aws s3 cp --recursive "${src%/}/" "${dst}" \
|
||||
"Download from asset storage failed"
|
||||
else
|
||||
mkdir -p "$(dirname "${dst}")"
|
||||
wrap aws s3 cp "${src}" "${dst}" \
|
||||
"Download from asset storage failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Upload assets to the cache storage bucket.
|
||||
#
|
||||
# $1: Path to asset file or directory to upload
|
||||
function upload_cache() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "Parameter required for cache upload"
|
||||
fi
|
||||
if [ -d "${1}" ]; then
|
||||
wrap aws s3 cp --recursive "${1}" "${asset_cache}/" \
|
||||
"Upload to cache failed"
|
||||
else
|
||||
wrap aws s3 cp "${1}" "${asset_cache}/" \
|
||||
"Upload to cache failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Download assets from the cache storage bucket. If
|
||||
# destination is not provided, remote path will be
|
||||
# used locally.
|
||||
#
|
||||
# $1: Path to asset or directory to download
|
||||
# $2: Optional destination for downloaded assets
|
||||
function download_cache() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "At least one parameter required for cache download"
|
||||
fi
|
||||
if [ "${2}" = "" ]; then
|
||||
dst="${1#/}"
|
||||
else
|
||||
dst="${2}"
|
||||
fi
|
||||
mkdir -p "${dst}"
|
||||
src="${asset_cache}/${1#/}"
|
||||
remote=$(aws s3 ls "${src}")
|
||||
if [[ "${remote}" = *" PRE "* ]]; then
|
||||
mkdir -p "${dst}"
|
||||
wrap aws s3 cp --recursive "${src%/}/" "${dst}" \
|
||||
"Download from cache storage failed"
|
||||
else
|
||||
mkdir -p "$(dirname "${dst}")"
|
||||
wrap aws s3 cp "${src}" "${dst}" \
|
||||
"Download from cache storage failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate arguments for GitHub release. Checks for
|
||||
# two arguments and that second argument is an exiting
|
||||
# file asset, or directory.
|
||||
#
|
||||
# $1: GitHub tag name
|
||||
# $2: Asset file or directory of assets
|
||||
function release_validate() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "Missing required position 1 argument (TAG) for release"
|
||||
fi
|
||||
if [ "${2}" = "" ]; then
|
||||
fail "Missing required position 2 argument (PATH) for release"
|
||||
fi
|
||||
if [ ! -e "${2}" ]; then
|
||||
fail "Path provided for release (${2}) does not exist"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate a GitHub release
|
||||
#
|
||||
# $1: GitHub tag name
|
||||
# $2: Asset file or directory of assets
|
||||
function release() {
|
||||
release_validate "${@}"
|
||||
wrap_raw ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${1}" -delete
|
||||
if [ $? -ne 0 ]; then
|
||||
wrap ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${1}" \
|
||||
"${1}" "${2}" "Failed to create release for version ${1}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate a GitHub prerelease
|
||||
#
|
||||
# $1: GitHub tag name
|
||||
# $2: Asset file or directory of assets
|
||||
function prerelease() {
|
||||
release_validate "${@}"
|
||||
if [[ "${1}" != *"+"* ]]; then
|
||||
ptag="${1}+${short_sha}"
|
||||
else
|
||||
ptag="${1}"
|
||||
fi
|
||||
|
||||
wrap_raw ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${ptag}" \
|
||||
-delete -prerelease "${ptag}" "${2}"
|
||||
if [ $? -ne 0 ]; then
|
||||
wrap ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${ptag}" \
|
||||
-prerelease "${ptag}" "${2}" \
|
||||
"Failed to create prerelease for version ${1}"
|
||||
fi
|
||||
echo -n "${ptag}"
|
||||
}
|
||||
|
||||
# Check if version string is valid for release
|
||||
#
|
||||
# $1: Version
|
||||
# Returns: 0 if valid, 1 if invalid
|
||||
function valid_release_version() {
|
||||
if [[ "${1}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate arguments for HashiCorp release. Ensures asset
|
||||
# directory exists, and checks that the SHASUMS and SHASUM.sig
|
||||
# files are present.
|
||||
#
|
||||
# $1: Asset directory
|
||||
function hashicorp_release_validate() {
|
||||
directory="${1}"
|
||||
|
||||
# Directory checks
|
||||
if [ "${directory}" = "" ]; then
|
||||
fail "No asset directory was provided for HashiCorp release"
|
||||
fi
|
||||
if [ ! -d "${directory}" ]; then
|
||||
fail "Asset directory for HashiCorp release does not exist"
|
||||
fi
|
||||
|
||||
# SHASUMS checks
|
||||
if [ ! -e "${directory}/"*SHASUMS ]; then
|
||||
fail "Asset directory is missing SHASUMS file"
|
||||
fi
|
||||
if [ ! -e "${directory}/"*SHASUMS.sig ]; then
|
||||
fail "Asset directory is missing SHASUMS signature file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify release assets by validating checksum properly match
|
||||
# and that signature file is valid
|
||||
#
|
||||
# $1: Asset directory
|
||||
function hashicorp_release_verify() {
|
||||
directory="${1}"
|
||||
pushd "${directory}" > "${output}"
|
||||
|
||||
# First do a checksum validation
|
||||
wrap shasum -a 256 -c *_SHA256SUMS \
|
||||
"Checksum validation of release assets failed"
|
||||
# Next check that the signature is valid
|
||||
gpghome=$(mktemp -qd)
|
||||
export GNUPGHOME="${gpghome}"
|
||||
wrap gpg --import "${HASHICORP_PUBLIC_GPG_KEY}" \
|
||||
"Failed to import HashiCorp public GPG key"
|
||||
wrap gpg --verify *SHA256SUMS.sig *SHA256SUMS \
|
||||
"Validation of SHA256SUMS signature failed"
|
||||
rm -rf "${gpghome}" > "${output}" 2>&1
|
||||
popd > "${output}"
|
||||
}
|
||||
|
||||
# Generate a HashiCorp release
|
||||
#
|
||||
# $1: Asset directory
|
||||
function hashicorp_release() {
|
||||
directory="${1}"
|
||||
|
||||
hashicorp_release_validate "${directory}"
|
||||
hashicorp_release_verify "${directory}"
|
||||
|
||||
oid="${AWS_ACCESS_KEY_ID}"
|
||||
okey="${AWS_SECRET_ACCESS_KEY}"
|
||||
export AWS_ACCESS_KEY_ID="${RELEASE_AWS_ACCESS_KEY_ID}"
|
||||
export AWS_SECRET_ACCESS_KEY="${RELEASE_AWS_SECRET_ACCESS_KEY}"
|
||||
|
||||
wrap_stream hc-releases upload "${directory}" \
|
||||
"Failed to upload HashiCorp release assets"
|
||||
wrap_stream hc-releases publish \
|
||||
"Failed to publish HashiCorp release"
|
||||
|
||||
export AWS_ACCESS_KEY_ID="${oid}"
|
||||
export AWS_SECRET_ACCESS_KEY="${okey}"
|
||||
}
|
||||
|
||||
# Configures git for hashibot usage
|
||||
function hashibot_git() {
|
||||
wrap git config user.name "${HASHIBOT_USERNAME}" \
|
||||
"Failed to setup git for hashibot usage (username)"
|
||||
wrap git config user.email "${HASHIBOT_EMAIL}" \
|
||||
"Failed to setup git for hashibot usage (email)"
|
||||
wrap git remote set-url origin "https://${HASHIBOT_USERNAME}:${HASHIBOT_TOKEN}@github.com/${repository}" \
|
||||
"Failed to setup git for hashibot usage (remote)"
|
||||
}
|
||||
|
||||
# Stub cleanup method which can be redefined
|
||||
# within actual script
|
||||
function cleanup() {
|
||||
(>&2 echo "** No cleanup tasks defined")
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# Enable debugging. This needs to be enabled with
|
||||
# extreme caution when used on public repositories.
|
||||
# Output with debugging enabled will likely include
|
||||
# secret values which should not be publicly exposed.
|
||||
#
|
||||
# If repository is public, FORCE_PUBLIC_DEBUG environment
|
||||
# variable must also be set.
|
||||
|
||||
is_private=$(curl -H "Authorization: token ${HASHIBOT_TOKEN}" -s "https://api.github.com/repos/${GITHUB_REPOSITORY}" | jq .private)
|
||||
|
||||
if [ "${DEBUG}" != "" ]; then
|
||||
if [ "${is_private}" = "false" ]; then
|
||||
if [ "${FORCE_PUBLIC_DEBUG}" != "" ]; then
|
||||
set -x
|
||||
output="/dev/stdout"
|
||||
else
|
||||
fail "Cannot enable debug mode on public repository unless forced"
|
||||
fi
|
||||
else
|
||||
set -x
|
||||
output="/dev/stdout"
|
||||
fi
|
||||
else
|
||||
output="/dev/null"
|
||||
fi
|
||||
|
||||
# Check if we are running a public repository on private runners
|
||||
if [ "${VAGRANT_PRIVATE}" != "" ] && [ "${is_private}" = "false" ]; then
|
||||
fail "Cannot run public repositories on private Vagrant runners. Disable runners now!"
|
||||
fi
|
||||
|
||||
# Common variables
|
||||
full_sha="${GITHUB_SHA}"
|
||||
short_sha="${full_sha:0:8}"
|
||||
ident_ref="${GITHUB_REF#*/*/}"
|
||||
if [[ "${GITHUB_REF}" == *"refs/tags/"* ]]; then
|
||||
tag="${GITHUB_REF##*tags/}"
|
||||
valid_release_version "${tag}"
|
||||
if [ $? -eq 0 ]; then
|
||||
release=1
|
||||
fi
|
||||
fi
|
||||
repository="${GITHUB_REPOSITORY}"
|
||||
repo_owner="${repository%/*}"
|
||||
repo_name="${repository#*/}"
|
||||
asset_cache="${ASSETS_PRIVATE_SHORTTERM}/${repository}/${GITHUB_ACTION}"
|
||||
job_id="${GITHUB_ACTION}"
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
. "${root}/.ci/common.sh"
|
||||
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
export PATH="${PATH}:${root}/.ci"
|
|
@ -1,62 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
ghr_version="0.13.0"
|
||||
|
||||
# NOTE: This release will generate a new release on the installers
|
||||
# repository which in turn triggers a full package build
|
||||
target_owner="hashicorp"
|
||||
target_repository="vagrant-builders"
|
||||
|
||||
csource="${BASH_SOURCE[0]}"
|
||||
while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done
|
||||
root="$( cd -P "$( dirname "$csource" )/../" && pwd )"
|
||||
|
||||
. "${root}/.ci/init.sh"
|
||||
|
||||
pushd "${root}" > "${output}"
|
||||
|
||||
# Install ghr
|
||||
wrap curl -Lso /tmp/ghr.tgz "https://github.com/tcnksm/ghr/releases/download/v${ghr_version}/ghr_v${ghr_version}_linux_amd64.tar.gz" \
|
||||
"Failed to download ghr utility"
|
||||
wrap tar -C /tmp/ -xf /tmp/ghr.tgz \
|
||||
"Failed to unpack ghr archive"
|
||||
wrap mv "/tmp/ghr_v${ghr_version}_linux_amd64/ghr" "${root}/.ci/" \
|
||||
"Failed to install ghr utility"
|
||||
|
||||
# Build our gem
|
||||
wrap gem build *.gemspec \
|
||||
"Failed to build Vagrant RubyGem"
|
||||
|
||||
# Get the path of our new gem
|
||||
g=(vagrant*.gem)
|
||||
gem=$(printf "%s" "${g}")
|
||||
|
||||
# Determine the version of the release
|
||||
vagrant_version="$(gem specification "${gem}" version)"
|
||||
vagrant_version="${vagrant_version##*version: }"
|
||||
|
||||
# We want to release into the builders repository so
|
||||
# update the repository variable with the desired destination
|
||||
repo_owner="${target_owner}"
|
||||
repo_name="${target_repository}"
|
||||
full_sha="master"
|
||||
|
||||
export GITHUB_TOKEN="${HASHIBOT_TOKEN}"
|
||||
|
||||
if [ "${tag}" = "" ]; then
|
||||
echo "Generating Vagrant RubyGem pre-release... "
|
||||
version="v${vagrant_version}+${short_sha}"
|
||||
prerelease "${version}" "${gem}"
|
||||
else
|
||||
# Validate this is a proper release version
|
||||
valid_release_version "${vagrant_version}"
|
||||
if [ $? -ne 0 ]; then
|
||||
fail "Invalid version format for Vagrant release: ${vagrant_version}"
|
||||
fi
|
||||
|
||||
echo "Generating Vagrant RubyGem release... "
|
||||
version="v${vagrant_version}"
|
||||
release "${version}" "${gem}"
|
||||
fi
|
||||
|
||||
slack -m "New Vagrant installers release triggered: *${version}*"
|
176
.ci/slack
176
.ci/slack
|
@ -1,176 +0,0 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
require "optparse"
|
||||
require "net/https"
|
||||
require "uri"
|
||||
require "json"
|
||||
|
||||
OPTIONS = [:channel, :username, :icon, :state, :message,
|
||||
:message_file, :file, :title, :tail, :webhook].freeze
|
||||
|
||||
options = {}
|
||||
|
||||
OptionParser.new do |opts|
|
||||
opts.banner = "Usage: #{File.basename(__FILE__)} [options]"
|
||||
|
||||
opts.on("-c", "--channel CHAN", "Send to channel") do |c|
|
||||
options[:channel] = c
|
||||
end
|
||||
|
||||
opts.on("-u", "--username USER", "Send as username") do |u|
|
||||
options[:username] = u
|
||||
end
|
||||
|
||||
opts.on("-i", "--icon URL", "User icon image") do |i|
|
||||
options[:icon] = i
|
||||
end
|
||||
|
||||
opts.on("-s", "--state STATE", "Message state (success, warn, error, or color code)") do |s|
|
||||
options[:state] = s
|
||||
end
|
||||
|
||||
opts.on("-m", "--message MESSAGE", "Message to send") do |m|
|
||||
options[:message] = m
|
||||
end
|
||||
|
||||
opts.on("-M", "--message-file MESSAGE_FILE", "Use file contents as message") do |m|
|
||||
options[:message_file] = m
|
||||
end
|
||||
|
||||
opts.on("-f", "--file MESSAGE_FILE", "Send raw contents of file in message") do |f|
|
||||
options[:file] = f
|
||||
end
|
||||
|
||||
opts.on("-t", "--title TITLE", "Message title") do |t|
|
||||
options[:title] = t
|
||||
end
|
||||
|
||||
opts.on("-T", "--tail N", "Send last N lines of content from raw message file") do |t|
|
||||
options[:tail] = t
|
||||
end
|
||||
|
||||
opts.on("-w", "--webhook HOOK", "Slack webhook") do |w|
|
||||
options[:webhook] = w
|
||||
end
|
||||
|
||||
opts.on("-h", "--help", "Print help") do
|
||||
puts opts
|
||||
exit
|
||||
end
|
||||
end.parse!
|
||||
|
||||
OPTIONS.each do |key|
|
||||
if !options.key?(key)
|
||||
env_key = "SLACK_#{key.to_s.upcase}"
|
||||
if ENV[env_key]
|
||||
options[key] = ENV[env_key]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if !options[:webhook]
|
||||
$stderr.puts "ERROR: Webhook is required!"
|
||||
exit 1
|
||||
end
|
||||
|
||||
if ENV["CIRCLECI"]
|
||||
options[:icon] = "https://emoji.slack-edge.com/TF1GCKJNM/circleci/054b58d488e65138.png" unless options[:icon]
|
||||
options[:username] = "circleci" unless options[:username]
|
||||
options[:footer] = "CircleCI - <#{ENV["CIRCLE_BUILD_URL"]}|#{ENV["CIRCLE_PROJECT_USERNAME"]}/#{ENV["CIRCLE_PROJECT_REPONAME"]}>"
|
||||
options[:footer_icon] = "https://emoji.slack-edge.com/TF1GCKJNM/circleci/054b58d488e65138.png"
|
||||
end
|
||||
|
||||
if ENV["GITHUB_ACTIONS"]
|
||||
options[:icon] = "https://ca.slack-edge.com/T024UT03C-WG8NDATGT-f82ae03b9fca-48" unless options[:icon]
|
||||
options[:username] = "github" unless options[:username]
|
||||
options[:footer] = "Actions - <https://github.com/#{ENV["GITHUB_REPOSITORY"]}/commit/#{ENV["GITHUB_SHA"]}/checks|#{ENV["GITHUB_REPOSITORY"]}>"
|
||||
options[:footer_icon] = "https://ca.slack-edge.com/T024UT03C-WG8NDATGT-f82ae03b9fca-48"
|
||||
end
|
||||
|
||||
options[:state] = "success" unless options[:state]
|
||||
|
||||
case options[:state]
|
||||
when "success", "good"
|
||||
options[:state] = "good"
|
||||
when "warn", "warning"
|
||||
options[:state] = "warning"
|
||||
when "error", "danger"
|
||||
options[:state] = "danger"
|
||||
else
|
||||
if !options[:state].start_with?("#")
|
||||
$stderr.puts "ERROR: Invalid value for `state` (#{options[:state]})"
|
||||
exit 1
|
||||
end
|
||||
end
|
||||
|
||||
msg = options[:message]
|
||||
|
||||
# NOTE: Message provided from CLI argument will end up with
|
||||
# double escaped newlines so remove one
|
||||
msg.gsub!("\\n", "\n") if msg
|
||||
|
||||
if options[:message_file]
|
||||
if !File.exist?(options[:message_file])
|
||||
$stderr.puts "ERROR: Message file does not exist `#{options[:message_file]}`"
|
||||
exit 1
|
||||
end
|
||||
msg_c = File.read(options[:message_file])
|
||||
msg = msg ? "#{msg}\n\n#{msg_c}" : msg_c
|
||||
end
|
||||
|
||||
if options[:file]
|
||||
if !File.exist?(options[:file])
|
||||
$stderr.puts "ERROR: Message file does not exist `#{options[:file]}`"
|
||||
exit 1
|
||||
end
|
||||
if (tail = options[:tail].to_i) > 0
|
||||
content = ""
|
||||
buffer = 0
|
||||
File.open(options[:file], "r") do |f|
|
||||
until (content.split("\n").size > tail) || buffer >= f.size
|
||||
buffer += 1000
|
||||
buffer = f.size if buffer > f.size
|
||||
f.seek(f.size - buffer)
|
||||
content = f.read
|
||||
end
|
||||
end
|
||||
parts = content.split("\n")
|
||||
if parts.size > tail
|
||||
parts = parts.slice(-tail, tail)
|
||||
end
|
||||
fmsg = parts ? parts.join("\n") : ""
|
||||
else
|
||||
fmsg = File.read(options[:file])
|
||||
end
|
||||
fmsg = "```\n#{fmsg}\n```"
|
||||
if msg
|
||||
msg = msg << "\n\n" << fmsg
|
||||
end
|
||||
end
|
||||
|
||||
if msg.to_s.empty?
|
||||
$stderr.puts "ERROR: No message content provided!"
|
||||
exit 1
|
||||
end
|
||||
|
||||
attach = {text: msg, fallback: msg, color: options[:state], mrkdn: true}
|
||||
attach[:title] = options[:title] if options[:title]
|
||||
attach[:footer] = options[:footer] if options[:footer]
|
||||
attach[:footer_icon] = options[:footer_icon] if options[:footer_icon]
|
||||
attach[:ts] = Time.now.to_i
|
||||
|
||||
payload = {}.tap do |pd|
|
||||
pd[:username] = options.fetch(:username, "packet-exec")
|
||||
pd[:channel] = options[:channel] if options[:channel]
|
||||
pd[:icon_url] = options[:icon] if options[:icon]
|
||||
pd[:attachments] = [attach]
|
||||
end
|
||||
|
||||
result = Net::HTTP.post(URI(options[:webhook]), payload.to_json, "Content-Type" => "application/json")
|
||||
|
||||
if !result.code.start_with?("2")
|
||||
$stderr.puts "Failed to send slack message"
|
||||
exit 1
|
||||
else
|
||||
$stdout.puts "ok"
|
||||
end
|
27
.ci/test.sh
27
.ci/test.sh
|
@ -1,27 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
csource="${BASH_SOURCE[0]}"
|
||||
while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done
|
||||
root="$( cd -P "$( dirname "$csource" )/../" && pwd )"
|
||||
|
||||
pushd "${root}" > /dev/null
|
||||
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
# Install required dependencies
|
||||
sudo apt-get update || exit 1
|
||||
sudo apt-get install -yq bsdtar || exit 1
|
||||
|
||||
# Ensure bundler is installed
|
||||
gem install --no-document bundler || exit 1
|
||||
|
||||
# Install the bundle
|
||||
bundle install || exit 1
|
||||
|
||||
# Run tests
|
||||
bundle exec rake test:unit
|
||||
|
||||
result=$?
|
||||
popd > /dev/null
|
||||
|
||||
exit $result
|
|
@ -1,6 +1,127 @@
|
|||
version: 2
|
||||
|
||||
reference:
|
||||
environment: &ENVIRONMENT
|
||||
SLACK_TITLE: Vagrant CI
|
||||
RELEASE_TARGET_REPONAME: vagrant-installers
|
||||
images:
|
||||
ruby24: &ruby24
|
||||
docker:
|
||||
- image: circleci/ruby:2.4
|
||||
ruby25: &ruby25
|
||||
docker:
|
||||
- image: circleci/ruby:2.5
|
||||
ruby26: &ruby26
|
||||
docker:
|
||||
- image: circleci/ruby:2.6
|
||||
builder: &builder
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
docker:
|
||||
- image: $BUILDER_IMAGE
|
||||
auth:
|
||||
username: $BUILDER_USERNAME
|
||||
password: $BUILDER_PASSWORD
|
||||
workflows:
|
||||
public: &PUBLIC_WORKFLOW
|
||||
filters:
|
||||
branches:
|
||||
only: /^pull\/.*/
|
||||
master: &MASTER_WORKFLOW
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
private_build: &PRIVATE_WORKFLOW_BUILD
|
||||
context: vagrant
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^build-.*/
|
||||
tags:
|
||||
only: /.*/
|
||||
jobs:
|
||||
private_failure: &PRIVATE_FAILURE
|
||||
run:
|
||||
name: Failure handler
|
||||
command: |
|
||||
if [ -f .output ]; then
|
||||
slack -m "Vagrant job has failed: *${CIRCLE_JOB}*" -s error -f .output -T 5
|
||||
else
|
||||
slack -m "Vagrant job has failed: *${CIRCLE_JOB}*" -s error
|
||||
fi
|
||||
when: on_fail
|
||||
unit_tests: &unit_tests
|
||||
steps:
|
||||
- run: sudo apt-get update ; sudo apt-get -yq install bsdtar
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
||||
- run:
|
||||
command: bundle check || bundle install --path vendor/bundle
|
||||
- save_cache:
|
||||
key: static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
||||
paths:
|
||||
- ./vendor/bundle
|
||||
- run: bundle exec rake test:unit
|
||||
jobs:
|
||||
build:
|
||||
<<: *builder
|
||||
steps:
|
||||
- checkout
|
||||
- run: gem build vagrant.gemspec
|
||||
- *PRIVATE_FAILURE
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- ./*.gem
|
||||
store:
|
||||
<<: *builder
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: |
|
||||
gem_name=(vagrant-*.gem)
|
||||
if [ "${CIRCLE_TAG}" == "" ]; then
|
||||
remote_gem_name="vagrant-master.gem"
|
||||
else
|
||||
remote_gem_name="vagrant.gem"
|
||||
fi
|
||||
if [[ "${CIRCLE_BRANCH}" = "build-"* ]]; then
|
||||
s3_dst="${ASSETS_PRIVATE_LONGTERM}/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/${CIRCLE_BRANCH##build-}/"
|
||||
else
|
||||
s3_dst="${ASSETS_PRIVATE_BUCKET}/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/"
|
||||
fi
|
||||
aws s3 cp "${gem_name}" "${s3_dst}${remote_gem_name}" > .output 2>&1
|
||||
- *PRIVATE_FAILURE
|
||||
release:
|
||||
<<: *builder
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: |
|
||||
set +e
|
||||
gem=(vagrant-*.gem)
|
||||
gem_version="${gem##vagrant-}"
|
||||
gem_version="${gem_version%%.gem}"
|
||||
export GITHUB_TOKEN="${HASHIBOT_TOKEN}"
|
||||
if [ "${CIRCLE_TAG}" = "" ]; then
|
||||
version="v${gem_version}+$(git rev-parse --short "${CIRCLE_SHA1}")"
|
||||
ghr -u ${CIRCLE_PROJECT_USERNAME} -r ${RELEASE_TARGET_REPONAME} -c master -prerelease -delete -replace ${version} ${gem} > .output 2>&1
|
||||
else
|
||||
version="${CIRCLE_TAG}"
|
||||
ghr -u ${CIRCLE_PROJECT_USERNAME} -r ${RELEASE_TARGET_REPONAME} -c master -delete -replace ${version} ${gem} > .output 2>&1
|
||||
fi
|
||||
slack -m "New Vagrant installers release triggered: *${version}*"
|
||||
- *PRIVATE_FAILURE
|
||||
test_ruby24:
|
||||
<<: *ruby24
|
||||
<<: *unit_tests
|
||||
test_ruby25:
|
||||
<<: *ruby25
|
||||
<<: *unit_tests
|
||||
test_ruby26:
|
||||
<<: *ruby26
|
||||
<<: *unit_tests
|
||||
build-website:
|
||||
# setting the working_directory along with the checkout path allows us to not have
|
||||
# to cd into the website/ directory for commands
|
||||
|
@ -24,6 +145,46 @@ jobs:
|
|||
command: ./scripts/deploy.sh
|
||||
workflows:
|
||||
version: 2
|
||||
builds:
|
||||
jobs:
|
||||
- build:
|
||||
<<: *PRIVATE_WORKFLOW_BUILD
|
||||
- store:
|
||||
<<: *PRIVATE_WORKFLOW_BUILD
|
||||
requires:
|
||||
- build
|
||||
- release:
|
||||
<<: *PRIVATE_WORKFLOW_BUILD
|
||||
requires:
|
||||
- build
|
||||
pull_requests:
|
||||
jobs:
|
||||
- test_ruby24:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
- test_ruby25:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
- test_ruby26:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
master:
|
||||
jobs:
|
||||
- test_ruby24:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- test_ruby25:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- test_ruby26:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- build:
|
||||
<<: *MASTER_WORKFLOW
|
||||
context: vagrant
|
||||
requires:
|
||||
- test_ruby24
|
||||
- test_ruby25
|
||||
- test_ruby26
|
||||
- store:
|
||||
<<: *MASTER_WORKFLOW
|
||||
context: vagrant
|
||||
requires:
|
||||
- build
|
||||
website:
|
||||
jobs:
|
||||
- build-website:
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'website/**'
|
||||
|
||||
jobs:
|
||||
build-gem:
|
||||
name: Build Vagrant RubyGem
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Set Ruby
|
||||
uses: actions/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '2.6'
|
||||
- name: Build RubyGem
|
||||
run: ./.ci/build.sh
|
||||
working-directory: ${{github.workspace}}
|
||||
env:
|
||||
ASSETS_LONGTERM_PREFIX: elt
|
||||
ASSETS_PRIVATE_BUCKET: ${{ secrets.ASSETS_PRIVATE_BUCKET }}
|
||||
ASSETS_PRIVATE_LONGTERM: ${{ secrets.ASSETS_PRIVATE_LONGTERM }}
|
||||
ASSETS_PRIVATE_SHORTTERM: ${{ secrets.ASSETS_PRIVATE_SHORTTERM }}
|
||||
ASSETS_PUBLIC_BUCKET: ${{ secrets.ASSETS_PUBLIC_BUCKET }}
|
||||
ASSETS_PUBLIC_LONGTERM: ${{ secrets.ASSETS_PUBLIC_LONGTERM }}
|
||||
ASSETS_PUBLIC_SHORTTERM: ${{ secrets.ASSETS_PUBLIC_SHORTTERM }}
|
||||
ASSETS_SHORTTERM_PREFIX: est
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
HASHIBOT_EMAIL: ${{ secrets.HASHIBOT_EMAIL }}
|
||||
HASHIBOT_TOKEN: ${{ secrets.HASHIBOT_TOKEN }}
|
||||
HASHIBOT_USERNAME: ${{ secrets.HASHIBOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
|
@ -1,38 +0,0 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'build-*'
|
||||
tags: '*'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'website/**'
|
||||
|
||||
jobs:
|
||||
trigger-release:
|
||||
name: Trigger Installers Build
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Set Ruby
|
||||
uses: actions/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '2.6'
|
||||
- name: Create Builders Release
|
||||
run: ./.ci/release.sh
|
||||
working-directory: ${{github.workspace}}
|
||||
env:
|
||||
ASSETS_LONGTERM_PREFIX: elt
|
||||
ASSETS_PRIVATE_BUCKET: est
|
||||
ASSETS_PRIVATE_LONGTERM: ${{ secrets.ASSETS_PRIVATE_LONGTERM }}
|
||||
ASSETS_PRIVATE_SHORTTERM: ${{ secrets.ASSETS_PRIVATE_SHORTTERM }}
|
||||
ASSETS_PUBLIC_BUCKET: ${{ secrets.ASSETS_PUBLIC_BUCKET }}
|
||||
ASSETS_PUBLIC_LONGTERM: ${{ secrets.ASSETS_PUBLIC_LONGTERM }}
|
||||
ASSETS_PUBLIC_SHORTTERM: ${{ secrets.ASSETS_PUBLIC_SHORTTERM }}
|
||||
ASSETS_SHORTTERM_PREFIX: ${{ secrets.ASSETS_SHORTTERM_PREFIX }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
HASHIBOT_EMAIL: ${{ secrets.HASHIBOT_EMAIL }}
|
||||
HASHIBOT_TOKEN: ${{ secrets.HASHIBOT_TOKEN }}
|
||||
HASHIBOT_USERNAME: ${{ secrets.HASHIBOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
|
@ -1,29 +0,0 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- 'test-*'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'website/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
runs-on: ubuntu-18.04
|
||||
strategy:
|
||||
matrix:
|
||||
ruby: [ '2.4.x', '2.5.x', '2.6.x' ]
|
||||
name: Vagrant unit tests on Ruby ${{ matrix.ruby }}
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Setup Ruby
|
||||
uses: actions/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: ${{matrix.ruby}}
|
||||
architecture: 'x64'
|
||||
- name: Run Tests
|
||||
run: .ci/test.sh
|
|
@ -1,12 +0,0 @@
|
|||
poll "closed_issue_locker" "locker" {
|
||||
schedule = "0 50 1 * * *"
|
||||
closed_for = "720h" # 30 days
|
||||
max_issues = 500
|
||||
sleep_between_issues = "5s"
|
||||
|
||||
message = <<-EOF
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
EOF
|
||||
}
|
47
CHANGELOG.md
47
CHANGELOG.md
|
@ -1,46 +1,3 @@
|
|||
## Next version (Unreleased)
|
||||
|
||||
FEATURES:
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
## 2.2.7 (January 27, 2020)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
- guest/opensuse: Check for basename hostname prior to setting hostname [GH-11170]
|
||||
- host/linux: Check for modinfo in /sbin if it's not on PATH [GH-11178]
|
||||
- core: Show guest name in hostname error message [GH-11175]
|
||||
- provisioners/shell: Linux guests now support `reboot` option [GH-11194]
|
||||
- darwin/nfs: Put each NFS export on its own line [GH-11216]
|
||||
- contrib/bash: Add more completion flags to up command [GH-11223]
|
||||
- provider/virtualbox: Add VirtualBox provider support for version 6.1.x [GH-11250]
|
||||
- box/outdated: Allow to force check for box updates and ignore cached check [GH-11231]
|
||||
- guest/alpine: Update apk cache when installing rsync [GH-11220]
|
||||
- provider/virtualbox: Improve error message when machine folder is inaccessible [GH-11239]
|
||||
- provisioner/ansible_local: Add pip install method for arch guests [GH-11265]
|
||||
- communicators/winssh: Use Windows shell for `vagrant ssh -c` [GH-11258]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
- command/snapshot/save: Fix regression that prevented snapshot of all guests in environment [GH-11152]
|
||||
- core: Update UI to properly retain newlines when adding prefix [GH-11126]
|
||||
- core: Check if box update is available locally [GH-11188]
|
||||
- core: Ensure Vagrant::Errors are loaded in file_checksum util [GH-11183]
|
||||
- cloud/publish: Improve argument handling for missing arguments to command [GH-11184]
|
||||
- core: Get latest version for current provider during outdated check [GH-11192]
|
||||
- linux/nfs: avoid adding extra newlines to /etc/exports [GH-11201]
|
||||
- guest/darwin: Fix VMware synced folders on APFS [GH-11267]
|
||||
- guest/redhat: Ensure `nfs-server` is restarted when installing nfs client [GH-11212]
|
||||
- core: Do not validate checksums if options are empty string [GH-11211]
|
||||
- provider/docker: Enhance docker build method to match against buildkit output [GH-11205]
|
||||
- provisioner/ansible_local: Don't prompt for input when installing Ansible on Ubuntu and Debian [GH-11191]
|
||||
- provisioner/ansible_local: Ensure all guest caps accept all passed in arguments [GH-11265]
|
||||
- host/windows: Fix regression that prevented port collisions from being detected [GH-11244]
|
||||
- core/provisioner: Set top level provisioner name if set in a provisioner config [GH-11295]
|
||||
|
||||
## 2.2.6 (October 14, 2019)
|
||||
|
||||
FEATURES:
|
||||
|
@ -56,7 +13,7 @@ IMPROVEMENTS:
|
|||
- guest/alt: Improve handling for using network tools when setting hostname [GH-11000]
|
||||
- guest/suse: Add ipv6 network config templates for SUSE based distributions [GH-11013]
|
||||
- guest/windows: Retry on connection timeout errors for the reboot capability [GH-11093]
|
||||
- host/bsd: Use host resolve path capability to modify local paths if required [GH-11108]
|
||||
- host/bsd: Use host resolve path capability to modify local paths if requird [GH-11108]
|
||||
- host/darwin: Add host resolve path capability to provide real paths for firmlinks [GH-11108]
|
||||
- provisioners/chef: Update pkg install flags for chef on FreeBSD guests [GH-11075]
|
||||
- provider/hyperv: Improve error message when VMMS is not running [GH-10978]
|
||||
|
@ -552,7 +509,7 @@ BUG FIXES:
|
|||
- core: Rescue more exceptions when checking if port is open [GH-8517]
|
||||
- guests/solaris11: Inherit from Solaris guest and keep solaris11 specific methods [GH-9034]
|
||||
- guests/windows: Split out cygwin path helper for msys2/cygwin paths and ensure cygpath exists [GH-8972]
|
||||
- guests/windows: Specify expected shell when executing on guest (fixes einssh communicator usage) [GH-9012]
|
||||
- guests/windows: Specify expected shell when executing on guest (fixes winssh communicator usage) [GH-9012]
|
||||
- guests/windows: Include WinSSH Communicator when using insert_public_key [GH-9105]
|
||||
- hosts/windows: Check for vagrant.exe when validating versions within WSL [GH-9107, GH-8962]
|
||||
- providers/docker: Isolate windows check within executor to handle running through VM [GH-8921]
|
||||
|
|
|
@ -75,19 +75,7 @@ _vagrant() {
|
|||
then
|
||||
local vm_list=$(find "${vagrant_state_file}/machines" -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
|
||||
fi
|
||||
local up_commands="\
|
||||
--provision \
|
||||
--no-provision \
|
||||
--provision-with \
|
||||
--destroy-on-error \
|
||||
--no-destroy-on-error \
|
||||
--parallel \
|
||||
--no-parallel
|
||||
--provider \
|
||||
--install-provider \
|
||||
--no-install-provider \
|
||||
-h \
|
||||
--help"
|
||||
local up_commands="--no-provision"
|
||||
COMPREPLY=($(compgen -W "${up_commands} ${vm_list}" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
|
|
|
@ -15,7 +15,6 @@ module Vagrant
|
|||
autoload :Confirm, "vagrant/action/builtin/confirm"
|
||||
autoload :ConfigValidate, "vagrant/action/builtin/config_validate"
|
||||
autoload :DestroyConfirm, "vagrant/action/builtin/destroy_confirm"
|
||||
autoload :Disk, "vagrant/action/builtin/disk"
|
||||
autoload :EnvSet, "vagrant/action/builtin/env_set"
|
||||
autoload :GracefulHalt, "vagrant/action/builtin/graceful_halt"
|
||||
autoload :HandleBox, "vagrant/action/builtin/handle_box"
|
||||
|
|
|
@ -348,15 +348,9 @@ module Vagrant
|
|||
end
|
||||
|
||||
if opts[:checksum] && opts[:checksum_type]
|
||||
if opts[:checksum].to_s.strip.empty?
|
||||
@logger.warn("Given checksum is empty, cannot validate checksum for box")
|
||||
elsif opts[:checksum_type].to_s.strip.empty?
|
||||
@logger.warn("Given checksum type is empty, cannot validate checksum for box")
|
||||
else
|
||||
env[:ui].detail(I18n.t("vagrant.actions.box.add.checksumming"))
|
||||
validate_checksum(
|
||||
opts[:checksum_type], opts[:checksum], box_url)
|
||||
end
|
||||
env[:ui].detail(I18n.t("vagrant.actions.box.add.checksumming"))
|
||||
validate_checksum(
|
||||
opts[:checksum_type], opts[:checksum], box_url)
|
||||
end
|
||||
|
||||
# Add the box!
|
||||
|
|
|
@ -40,7 +40,7 @@ module Vagrant
|
|||
# Have download options specified in the environment override
|
||||
# options specified for the machine.
|
||||
download_options = {
|
||||
automatic_check: !env[:box_outdated_force],
|
||||
automatic_check: true,
|
||||
ca_cert: env[:ca_cert] || machine.config.vm.box_download_ca_cert,
|
||||
ca_path: env[:ca_path] || machine.config.vm.box_download_ca_path,
|
||||
client_cert: env[:client_cert] ||
|
||||
|
@ -70,23 +70,15 @@ module Vagrant
|
|||
message: e.message))
|
||||
end
|
||||
env[:box_outdated] = update != nil
|
||||
local_update = check_outdated_local(env)
|
||||
if update && (local_update.nil? || (local_update.version < update[1].version))
|
||||
if update
|
||||
env[:ui].warn(I18n.t(
|
||||
"vagrant.box_outdated_single",
|
||||
name: update[0].name,
|
||||
provider: box.provider,
|
||||
current: box.version,
|
||||
latest: update[1].version))
|
||||
elsif local_update
|
||||
env[:ui].warn(I18n.t(
|
||||
"vagrant.box_outdated_local",
|
||||
name: local_update.name,
|
||||
old: box.version,
|
||||
new: local_update.version))
|
||||
env[:box_outdated] = true
|
||||
else
|
||||
env[:box_outdated] = false
|
||||
check_outdated_local(env)
|
||||
end
|
||||
|
||||
@app.call(env)
|
||||
|
@ -101,8 +93,19 @@ module Vagrant
|
|||
version ||= ""
|
||||
version += "> #{machine.box.version}"
|
||||
|
||||
env[:box_collection].find(
|
||||
box = env[:box_collection].find(
|
||||
machine.box.name, machine.box.provider, version)
|
||||
if box
|
||||
env[:ui].warn(I18n.t(
|
||||
"vagrant.box_outdated_local",
|
||||
name: box.name,
|
||||
old: machine.box.version,
|
||||
new: box.version))
|
||||
env[:box_outdated] = true
|
||||
return
|
||||
end
|
||||
|
||||
env[:box_outdated] = false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
module Vagrant
|
||||
module Action
|
||||
module Builtin
|
||||
class Disk
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
@logger = Log4r::Logger.new("vagrant::action::builtin::disk")
|
||||
end
|
||||
|
||||
def call(env)
|
||||
machine = env[:machine]
|
||||
defined_disks = get_disks(machine, env)
|
||||
|
||||
# Call into providers machine implementation for disk management
|
||||
if !defined_disks.empty?
|
||||
if machine.provider.capability?(:configure_disks)
|
||||
machine.provider.capability(:configure_disks, defined_disks)
|
||||
else
|
||||
env[:ui].warn(I18n.t("vagrant.actions.disk.provider_unsupported",
|
||||
provider: machine.provider_name))
|
||||
end
|
||||
end
|
||||
|
||||
# Continue On
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def get_disks(machine, env)
|
||||
return @_disks if @_disks
|
||||
|
||||
@_disks = []
|
||||
@_disks = machine.config.vm.disks
|
||||
|
||||
@_disks
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -25,27 +25,9 @@ module Vagrant
|
|||
# Store in the type map so that --provision-with works properly
|
||||
@_provisioner_types[result] = provisioner.type
|
||||
|
||||
# Set top level provisioner name to provisioner configs name if top level name not set.
|
||||
# This is mostly for handling the shell provisioner, if a user has set its name like:
|
||||
#
|
||||
# config.vm.provision "shell", name: "my_provisioner"
|
||||
#
|
||||
# Where `name` is a shell config option, not a top level provisioner class option
|
||||
#
|
||||
# Note: `name` is set to a symbol, since it is converted to one via #Config::VM.provision
|
||||
provisioner_name = provisioner.name
|
||||
if !provisioner_name
|
||||
if provisioner.config.respond_to?(:name) &&
|
||||
provisioner.config.name
|
||||
provisioner_name = provisioner.config.name.to_sym
|
||||
end
|
||||
else
|
||||
provisioner_name = provisioner_name.to_sym
|
||||
end
|
||||
|
||||
# Build up the options
|
||||
options = {
|
||||
name: provisioner_name,
|
||||
name: provisioner.name,
|
||||
run: provisioner.run,
|
||||
before: provisioner.before,
|
||||
after: provisioner.after,
|
||||
|
|
|
@ -36,35 +36,17 @@ module Vagrant
|
|||
|
||||
# Get the command and wrap it in a login shell
|
||||
command = ShellQuote.escape(env[:ssh_run_command], "'")
|
||||
|
||||
if env[:machine].config.vm.communicator == :winssh
|
||||
shell = env[:machine].config.winssh.shell
|
||||
else
|
||||
shell = env[:machine].config.ssh.shell
|
||||
end
|
||||
|
||||
if shell == "cmd"
|
||||
# Add an extra space to the command so cmd.exe quoting works
|
||||
# properly
|
||||
command = "#{shell} /C #{command} "
|
||||
elsif shell == "powershell"
|
||||
command = "$ProgressPreference = \"SilentlyContinue\"; #{command}"
|
||||
command = Base64.strict_encode64(command.encode("UTF-16LE", "UTF-8"))
|
||||
command = "#{shell} -encodedCommand #{command}"
|
||||
else
|
||||
command = "#{shell} -c '#{command}'"
|
||||
end
|
||||
command = "#{env[:machine].config.ssh.shell} -c '#{command}'"
|
||||
|
||||
# Execute!
|
||||
opts = env[:ssh_opts] || {}
|
||||
opts[:extra_args] ||= []
|
||||
|
||||
# Allow the user to specify a tty or non-tty manually, but if they
|
||||
# don't then we default to a TTY unless they are using WinSSH
|
||||
# don't then we default to a TTY
|
||||
if !opts[:extra_args].include?("-t") &&
|
||||
!opts[:extra_args].include?("-T") &&
|
||||
env[:tty] &&
|
||||
env[:machine].config.vm.communicator != :winssh
|
||||
env[:tty]
|
||||
opts[:extra_args] << "-t"
|
||||
end
|
||||
|
||||
|
|
|
@ -47,16 +47,7 @@ module Vagrant
|
|||
raise Errors::VagrantInterrupt if env[:interrupted]
|
||||
action = @actions.shift
|
||||
@logger.info("Calling IN action: #{action}")
|
||||
|
||||
if !action.is_a?(Proc) && env[:hook]
|
||||
hook_name = action.class.name.split("::").last.
|
||||
gsub(/([a-z])([A-Z])/, '\1_\2').gsub('-', '_').downcase
|
||||
end
|
||||
|
||||
env[:hook].call("before_#{hook_name}".to_sym) if hook_name
|
||||
@stack.unshift(action).first.call(env)
|
||||
env[:hook].call("after_#{hook_name}".to_sym) if hook_name
|
||||
|
||||
raise Errors::VagrantInterrupt if env[:interrupted]
|
||||
@logger.info("Calling OUT action: #{action}")
|
||||
rescue SystemExit
|
||||
|
|
|
@ -68,25 +68,11 @@ module Vagrant
|
|||
|
||||
# Returns all the versions supported by this metadata. These
|
||||
# versions are sorted so the last element of the list is the
|
||||
# latest version. Optionally filter versions by a matching
|
||||
# provider.
|
||||
# latest version.
|
||||
#
|
||||
# @return[Array<String>]
|
||||
def versions(**opts)
|
||||
provider = nil
|
||||
provider = opts[:provider].to_sym if opts[:provider]
|
||||
|
||||
if provider
|
||||
@version_map.select do |version, raw|
|
||||
if raw["providers"]
|
||||
raw["providers"].detect do |p|
|
||||
p["name"].to_sym == provider
|
||||
end
|
||||
end
|
||||
end.keys.sort.map(&:to_s)
|
||||
else
|
||||
@version_map.keys.sort.map(&:to_s)
|
||||
end
|
||||
def versions
|
||||
@version_map.keys.sort.map(&:to_s)
|
||||
end
|
||||
|
||||
# Represents a single version within the metadata.
|
||||
|
|
|
@ -436,10 +436,6 @@ module Vagrant
|
|||
error_key(:machine_action_locked)
|
||||
end
|
||||
|
||||
class MachineFolderNotAccessible < VagrantError
|
||||
error_key(:machine_folder_not_accessible)
|
||||
end
|
||||
|
||||
class MachineGuestNotReady < VagrantError
|
||||
error_key(:machine_guest_not_ready)
|
||||
end
|
||||
|
|
|
@ -329,15 +329,10 @@ module Vagrant
|
|||
target = opts[:target] if opts.key?(:target)
|
||||
target = "#{target}:" if target != ""
|
||||
|
||||
# Get the lines. The first default is because if the message
|
||||
# is an empty string, then we want to still use the empty string.
|
||||
lines = [message]
|
||||
if message != ""
|
||||
lines = [].tap do |l|
|
||||
message.scan(/(.*?)(\n|$)/).each do |m|
|
||||
l << m.first if m.first != "" || (m.first == "" && m.last == "\n")
|
||||
end
|
||||
end
|
||||
lines << "" if message.end_with?("\n")
|
||||
end
|
||||
lines = message.split("\n") if message != ""
|
||||
|
||||
# Otherwise, make sure to prefix every line properly
|
||||
lines.map do |line|
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
# passed into FileChecksum. Note that this isn't strictly enforced at
|
||||
# the moment, and this class isn't directly used. It is merely here for
|
||||
# documentation of structure of the class.
|
||||
|
||||
require "vagrant/errors"
|
||||
|
||||
class DigestClass
|
||||
def update(string); end
|
||||
def hexdigest; end
|
||||
|
@ -65,9 +62,8 @@ class FileChecksum
|
|||
def load_digest(type)
|
||||
digest = CHECKSUM_MAP[type.to_s.to_sym]
|
||||
if digest.nil?
|
||||
raise Vagrant::Errors::BoxChecksumInvalidType,
|
||||
type: type.to_s,
|
||||
types: CHECKSUM_MAP.keys.join(', ')
|
||||
raise Errors::BoxChecksumInvalidType,
|
||||
type: type.to_s
|
||||
end
|
||||
digest
|
||||
end
|
||||
|
|
|
@ -30,7 +30,8 @@ module Vagrant
|
|||
return true
|
||||
end
|
||||
rescue Timeout::Error, Errno::ECONNREFUSED, Errno::EHOSTUNREACH, \
|
||||
Errno::ENETUNREACH, Errno::EACCES, Errno::ENOTCONN
|
||||
Errno::ENETUNREACH, Errno::EACCES, Errno::ENOTCONN, \
|
||||
Errno::EADDRNOTAVAIL
|
||||
# Any of the above exceptions signal that the port is closed.
|
||||
return false
|
||||
end
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
require "log4r"
|
||||
|
||||
module Vagrant
|
||||
module Util
|
||||
class Numeric
|
||||
|
||||
# Authors Note: This conversion has been borrowed from the ActiveSupport Numeric class
|
||||
# Conversion helper constants
|
||||
KILOBYTE = 1024
|
||||
MEGABYTE = KILOBYTE * 1024
|
||||
GIGABYTE = MEGABYTE * 1024
|
||||
TERABYTE = GIGABYTE * 1024
|
||||
PETABYTE = TERABYTE * 1024
|
||||
EXABYTE = PETABYTE * 1024
|
||||
|
||||
BYTES_CONVERSION_MAP = {KB: KILOBYTE, MB: MEGABYTE, GB: GIGABYTE, TB: TERABYTE,
|
||||
PB: PETABYTE, EB: EXABYTE}
|
||||
|
||||
# Regex borrowed from the vagrant-disksize config class
|
||||
SHORTHAND_MATCH_REGEX = /^(?<number>[0-9]+)\s?(?<unit>KB|MB|GB|TB)?$/
|
||||
|
||||
class << self
|
||||
LOGGER = Log4r::Logger.new("vagrant::util::numeric")
|
||||
|
||||
# A helper that converts a shortcut string to its bytes representation.
|
||||
# The expected format of `str` is essentially: "<Number>XX"
|
||||
# Where `XX` is shorthand for KB, MB, GB, TB, PB, or EB. For example, 50 megabytes:
|
||||
#
|
||||
# str = "50MB"
|
||||
#
|
||||
# @param [String] - str
|
||||
# @return [Integer,nil] - bytes - returns nil if method fails to convert to bytes
|
||||
def string_to_bytes(str)
|
||||
bytes = nil
|
||||
|
||||
str = str.to_s.strip
|
||||
matches = SHORTHAND_MATCH_REGEX.match(str)
|
||||
if matches
|
||||
number = matches[:number].to_i
|
||||
unit = matches[:unit].to_sym
|
||||
|
||||
if BYTES_CONVERSION_MAP.key?(unit)
|
||||
bytes = number * BYTES_CONVERSION_MAP[unit]
|
||||
else
|
||||
LOGGER.error("An invalid unit or format was given, string_to_bytes cannot convert #{str}")
|
||||
end
|
||||
end
|
||||
|
||||
bytes
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for platform. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
def reset!
|
||||
instance_variables.each(&method(:remove_instance_variable))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -26,10 +26,6 @@ module VagrantPlugins
|
|||
options[:global] = g
|
||||
end
|
||||
|
||||
o.on("-f", "--force", "Force checks for latest box updates") do |f|
|
||||
options[:force] = f
|
||||
end
|
||||
|
||||
build_download_options(o, download_options)
|
||||
end
|
||||
|
||||
|
@ -44,7 +40,7 @@ module VagrantPlugins
|
|||
|
||||
with_target_vms(argv) do |machine|
|
||||
@env.action_runner.run(Vagrant::Action.action_box_outdated, {
|
||||
box_outdated_force: options[:force],
|
||||
box_outdated_force: true,
|
||||
box_outdated_refresh: true,
|
||||
box_outdated_success_ui: true,
|
||||
machine: machine,
|
||||
|
@ -77,15 +73,7 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
current = Gem::Version.new(box.version)
|
||||
box_versions = md.versions(provider: box.provider)
|
||||
|
||||
if box_versions.empty?
|
||||
latest_box_version = box_versions.last.to_i
|
||||
else
|
||||
latest_box_version = box_versions.last
|
||||
end
|
||||
|
||||
latest = Gem::Version.new(latest_box_version)
|
||||
latest = Gem::Version.new(md.versions.last)
|
||||
if latest <= current
|
||||
@env.ui.success(I18n.t(
|
||||
"vagrant.box_up_to_date",
|
||||
|
|
|
@ -144,7 +144,7 @@ en:
|
|||
unauthorized: |-
|
||||
Invalid username or password. Please try again.
|
||||
unexpected_error: |-
|
||||
An unexpected error occurred: %{error}
|
||||
An unexpected error occured: %{error}
|
||||
|
||||
check_logged_in: |-
|
||||
You are already logged in.
|
||||
|
|
|
@ -55,7 +55,7 @@ module VagrantPlugins
|
|||
argv = parse_options(opts)
|
||||
return if !argv
|
||||
|
||||
if argv.empty? || argv.length > 4 || argv.length < 3 || (argv.length == 3 && !options[:url])
|
||||
if argv.empty? || argv.length > 4 || argv.length < 3
|
||||
raise Vagrant::Errors::CLIInvalidUsage,
|
||||
help: opts.help.chomp
|
||||
end
|
||||
|
|
|
@ -15,9 +15,6 @@ module VagrantPlugins
|
|||
o.separator "can be restored via `vagrant snapshot restore` at any point in the"
|
||||
o.separator "future to get back to this exact machine state."
|
||||
o.separator ""
|
||||
o.separator "If no vm-name is given, Vagrant will take a snapshot of"
|
||||
o.separator "the entire environment with the same snapshot name."
|
||||
o.separator ""
|
||||
o.separator "Snapshots are useful for experimenting in a machine and being able"
|
||||
o.separator "to rollback quickly."
|
||||
|
||||
|
@ -34,22 +31,20 @@ module VagrantPlugins
|
|||
help: opts.help.chomp
|
||||
end
|
||||
|
||||
name = argv.pop
|
||||
# If no snapshot name is given, the backup name is the same as the machine name.
|
||||
# If there is a name given, we need to remove it and save it as `name`. Otherwise
|
||||
# `with_target_vms` will treat the snapshot name as a guest name.
|
||||
if argv.size < 2
|
||||
name = argv.first
|
||||
else
|
||||
name = argv.pop
|
||||
end
|
||||
|
||||
with_target_vms(argv) do |vm|
|
||||
if !vm.provider.capability?(:snapshot_list)
|
||||
raise Vagrant::Errors::SnapshotNotSupported
|
||||
end
|
||||
|
||||
# In this case, no vm name was given, and we are iterating over the
|
||||
# entire environment. If a vm hasn't been created yet, we can't list
|
||||
# its snapshots
|
||||
if vm.id.nil?
|
||||
@env.ui.warn(I18n.t("vagrant.commands.snapshot.save.vm_not_created",
|
||||
name: vm.name))
|
||||
next
|
||||
end
|
||||
|
||||
snapshot_list = vm.provider.capability(:snapshot_list)
|
||||
|
||||
if !snapshot_list.include? name
|
||||
|
|
|
@ -8,7 +8,7 @@ module VagrantPlugins
|
|||
|
||||
def self.rsync_install(machine)
|
||||
machine.communicate.tap do |comm|
|
||||
comm.sudo('apk add --update-cache rsync')
|
||||
comm.sudo('apk add rsync')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -45,22 +45,6 @@ module VagrantPlugins
|
|||
require_relative 'cap/smb'
|
||||
Cap::SMB
|
||||
end
|
||||
|
||||
def self.check_community_plugin
|
||||
plugins = Vagrant::Plugin::Manager.instance.installed_plugins
|
||||
if plugins.keys.include?("vagrant-alpine")
|
||||
$stderr.puts <<-EOF
|
||||
WARNING: Vagrant has detected the `vagrant-alpine` plugin. This plugin's
|
||||
functionality has been merged into the main Vagrant project and should be
|
||||
considered deprecated. To uninstall the plugin, run the command shown below:
|
||||
|
||||
vagrant plugin uninstall vagrant-alpine
|
||||
|
||||
EOF
|
||||
end
|
||||
end
|
||||
|
||||
self.check_community_plugin
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
require "securerandom"
|
||||
|
||||
module VagrantPlugins
|
||||
module GuestDarwin
|
||||
module Cap
|
||||
|
@ -7,102 +5,31 @@ module VagrantPlugins
|
|||
|
||||
# we seem to be unable to ask 'mount -t vmhgfs' to mount the roots
|
||||
# of specific shares, so instead we symlink from what is already
|
||||
# mounted by the guest tools
|
||||
# mounted by the guest tools
|
||||
# (ie. the behaviour of the VMware_fusion provider prior to 0.8.x)
|
||||
|
||||
def self.mount_vmware_shared_folder(machine, name, guestpath, options)
|
||||
# Use this variable to determine which machines
|
||||
# have been registered with after hook
|
||||
@apply_firmlinks ||= Hash.new{ |h, k| h[k] = {bootstrap: false, content: []} }
|
||||
|
||||
machine.communicate.tap do |comm|
|
||||
# check if we are dealing with an APFS root container
|
||||
if comm.test("test -d /System/Volumes/Data")
|
||||
parts = Pathname.new(guestpath).descend.to_a
|
||||
firmlink = parts[1].to_s
|
||||
firmlink.slice!(0, 1) if firmlink.start_with?("/")
|
||||
if parts.size > 2
|
||||
guestpath = File.join("/System/Volumes/Data", guestpath)
|
||||
else
|
||||
guestpath = nil
|
||||
end
|
||||
# clear prior symlink
|
||||
if comm.test("test -L \"#{guestpath}\"", sudo: true)
|
||||
comm.sudo("rm -f \"#{guestpath}\"")
|
||||
end
|
||||
|
||||
# Remove existing symlink or directory if defined
|
||||
if guestpath
|
||||
if comm.test("test -L \"#{guestpath}\"")
|
||||
comm.sudo("rm -f \"#{guestpath}\"")
|
||||
elsif comm.test("test -d \"#{guestpath}\"")
|
||||
comm.sudo("rm -Rf \"#{guestpath}\"")
|
||||
end
|
||||
|
||||
# create intermediate directories if needed
|
||||
intermediate_dir = File.dirname(guestpath)
|
||||
if intermediate_dir != "/"
|
||||
comm.sudo("mkdir -p \"#{intermediate_dir}\"")
|
||||
end
|
||||
|
||||
comm.sudo("ln -s \"/Volumes/VMware Shared Folders/#{name}\" \"#{guestpath}\"")
|
||||
# clear prior directory if exists
|
||||
if comm.test("test -d \"#{guestpath}\"", sudo: true)
|
||||
comm.sudo("rm -Rf \"#{guestpath}\"")
|
||||
end
|
||||
|
||||
if firmlink && !system_firmlink?(firmlink)
|
||||
if guestpath.nil?
|
||||
guestpath = "/Volumes/VMware Shared Folders/#{name}"
|
||||
else
|
||||
guestpath = File.join("/System/Volumes/Data", firmlink)
|
||||
end
|
||||
|
||||
share_line = "#{firmlink}\t#{guestpath}"
|
||||
|
||||
# Check if the line is already defined. If so, bail since we are done
|
||||
if !comm.test("[[ \"$(</etc/synthetic.conf)\" = *\"#{share_line}\"* ]]")
|
||||
@apply_firmlinks[machine.id][:bootstrap] = true
|
||||
end
|
||||
|
||||
# If we haven't already added our hook to apply firmlinks, do it now
|
||||
if @apply_firmlinks[machine.id][:content].empty?
|
||||
Plugin.action_hook(:apfs_firmlinks, :after_synced_folders) do |hook|
|
||||
action = proc { |*_|
|
||||
content = @apply_firmlinks[machine.id][:content].join("\n")
|
||||
# Write out the synthetic file
|
||||
comm.sudo("echo -e #{content.inspect} > /etc/synthetic.conf")
|
||||
if @apply_firmlinks[:bootstrap]
|
||||
# Re-bootstrap the root container to pick up firmlink updates
|
||||
comm.sudo("/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B")
|
||||
end
|
||||
}
|
||||
hook.prepend(action)
|
||||
end
|
||||
end
|
||||
@apply_firmlinks[machine.id][:content] << share_line
|
||||
# create intermediate directories if needed
|
||||
intermediate_dir = File.dirname(guestpath)
|
||||
if !comm.test("test -d \"#{intermediate_dir}\"", sudo: true)
|
||||
comm.sudo("mkdir -p \"#{intermediate_dir}\"")
|
||||
end
|
||||
|
||||
# finally make the symlink
|
||||
comm.sudo("ln -s \"/Volumes/VMware Shared Folders/#{name}\" \"#{guestpath}\"")
|
||||
end
|
||||
end
|
||||
|
||||
# Check if firmlink is provided by the system
|
||||
#
|
||||
# @param [String] firmlink Firmlink path
|
||||
# @return [Boolean]
|
||||
def self.system_firmlink?(firmlink)
|
||||
if !@_firmlinks
|
||||
if File.exist?("/usr/share/firmlinks")
|
||||
@_firmlinks = File.readlines("/usr/share/firmlinks").map do |line|
|
||||
line.split.first
|
||||
end
|
||||
else
|
||||
@_firmlinks = []
|
||||
end
|
||||
end
|
||||
firmlink = "/#{firmlink}" if !firmlink.start_with?("/")
|
||||
@_firmlinks.include?(firmlink)
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for capability. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
def self.reset!
|
||||
instance_variables.each(&method(:remove_instance_variable))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
require "log4r"
|
||||
|
||||
module VagrantPlugins
|
||||
module GuestLinux
|
||||
module Cap
|
||||
class Reboot
|
||||
MAX_REBOOT_RETRY_DURATION = 120
|
||||
|
||||
def self.reboot(machine)
|
||||
@logger = Log4r::Logger.new("vagrant::linux::reboot")
|
||||
reboot_script = "reboot"
|
||||
|
||||
comm = machine.communicate
|
||||
|
||||
@logger.debug("Issuing reboot command for guest")
|
||||
comm.sudo(reboot_script)
|
||||
|
||||
machine.ui.info(I18n.t("vagrant.guests.capabilities.rebooting"))
|
||||
|
||||
@logger.debug("Waiting for machine to finish rebooting")
|
||||
|
||||
wait_remaining = MAX_REBOOT_RETRY_DURATION
|
||||
begin
|
||||
wait_for_reboot(machine)
|
||||
rescue Vagrant::Errors::MachineGuestNotReady => e
|
||||
raise if wait_remaining < 0
|
||||
@logger.warn("Machine not ready, cannot start reboot yet. Trying again")
|
||||
sleep(5)
|
||||
wait_remaining -= 5
|
||||
retry
|
||||
end
|
||||
end
|
||||
|
||||
def self.wait_for_reboot(machine)
|
||||
while !machine.guest.ready?
|
||||
sleep 10
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -82,16 +82,6 @@ module VagrantPlugins
|
|||
Cap::ReadIPAddress
|
||||
end
|
||||
|
||||
guest_capability(:linux, :wait_for_reboot) do
|
||||
require_relative "cap/reboot"
|
||||
Cap::Reboot
|
||||
end
|
||||
|
||||
guest_capability(:linux, :reboot) do
|
||||
require_relative "cap/reboot"
|
||||
Cap::Reboot
|
||||
end
|
||||
|
||||
guest_capability(:linux, :remove_public_key) do
|
||||
require_relative "cap/public_key"
|
||||
Cap::PublicKey
|
||||
|
|
|
@ -5,7 +5,7 @@ module VagrantPlugins
|
|||
def self.nfs_client_install(machine)
|
||||
machine.communicate.sudo <<-EOH.gsub(/^ {12}/, '')
|
||||
if command -v dnf; then
|
||||
if `dnf info -q libnfs-utils > /dev/null 2>&1` ; then
|
||||
if `dnf info -q libnfs-utils > /dev/null 2>&1` ; then
|
||||
dnf -y install nfs-utils libnfs-utils portmap
|
||||
else
|
||||
dnf -y install nfs-utils nfs-utils-lib portmap
|
||||
|
@ -15,7 +15,7 @@ module VagrantPlugins
|
|||
fi
|
||||
|
||||
if test $(ps -o comm= 1) == 'systemd'; then
|
||||
/bin/systemctl restart rpcbind nfs-server
|
||||
/bin/systemctl restart rpcbind nfs
|
||||
else
|
||||
/etc/init.d/rpcbind restart
|
||||
/etc/init.d/nfs restart
|
||||
|
|
|
@ -5,8 +5,8 @@ module VagrantPlugins
|
|||
def self.change_host_name(machine, name)
|
||||
comm = machine.communicate
|
||||
|
||||
basename = name.split(".", 2)[0]
|
||||
if !comm.test('test "$(hostnamectl --static status)" = "#{basename}"', sudo: false)
|
||||
if !comm.test("getent hosts '#{name}'", sudo: false)
|
||||
basename = name.split(".", 2)[0]
|
||||
comm.sudo <<-EOH.gsub(/^ {14}/, '')
|
||||
hostnamectl set-hostname '#{basename}'
|
||||
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
module VagrantPlugins
|
||||
module HostDarwin
|
||||
module Cap
|
||||
class NFS
|
||||
def self.nfs_exports_template(environment)
|
||||
"nfs/exports_darwin"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -55,11 +55,6 @@ module VagrantPlugins
|
|||
require_relative "cap/configured_ip_addresses"
|
||||
Cap::ConfiguredIPAddresses
|
||||
end
|
||||
|
||||
host_capability("darwin", "nfs_exports_template") do
|
||||
require_relative "cap/nfs"
|
||||
Cap::NFS
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -77,7 +77,7 @@ module VagrantPlugins
|
|||
sleep 0.5
|
||||
|
||||
nfs_cleanup("#{Process.uid} #{id}")
|
||||
output = nfs_exports_content + output
|
||||
output = "#{nfs_exports_content}\n#{output}"
|
||||
nfs_write_exports(output)
|
||||
|
||||
if nfs_running?(nfs_check_command)
|
||||
|
@ -93,7 +93,7 @@ module VagrantPlugins
|
|||
"systemctl --no-pager --no-legend --plain list-unit-files --all --type=service " \
|
||||
"| grep #{nfs_service_name_systemd}").exit_code == 0
|
||||
else
|
||||
Vagrant::Util::Subprocess.execute(modinfo_path, "nfsd").exit_code == 0 ||
|
||||
Vagrant::Util::Subprocess.execute("modinfo", "nfsd").exit_code == 0 ||
|
||||
Vagrant::Util::Subprocess.execute("grep", "nfsd", "/proc/filesystems").exit_code == 0
|
||||
end
|
||||
end
|
||||
|
@ -261,24 +261,6 @@ module VagrantPlugins
|
|||
Vagrant::Util::Subprocess.execute(*Shellwords.split(check_command)).exit_code == 0
|
||||
end
|
||||
|
||||
def self.modinfo_path
|
||||
if !defined?(@_modinfo_path)
|
||||
@_modinfo_path = Vagrant::Util::Which.which("modinfo")
|
||||
|
||||
if @_modinfo_path.to_s.empty?
|
||||
path = "/sbin/modinfo"
|
||||
if File.file?(path)
|
||||
@_modinfo_path = path
|
||||
end
|
||||
end
|
||||
|
||||
if @_modinfo_path.to_s.empty?
|
||||
@_modinfo_path = "modinfo"
|
||||
end
|
||||
end
|
||||
@_modinfo_path
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for capability. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
require "log4r"
|
||||
require "securerandom"
|
||||
|
||||
require "vagrant/util/numeric"
|
||||
|
||||
module VagrantPlugins
|
||||
module Kernel_V2
|
||||
class VagrantConfigDisk < Vagrant.plugin("2", :config)
|
||||
#-------------------------------------------------------------------
|
||||
# Config class for a given Disk
|
||||
#-------------------------------------------------------------------
|
||||
|
||||
DEFAULT_DISK_TYPES = [:disk, :dvd, :floppy].freeze
|
||||
|
||||
# Note: This value is for internal use only
|
||||
#
|
||||
# @return [String]
|
||||
attr_reader :id
|
||||
|
||||
# File name for the given disk. Defaults to a generated name that is:
|
||||
#
|
||||
# vagrant_<disk_type>_<short_uuid>
|
||||
#
|
||||
# @return [String]
|
||||
attr_accessor :name
|
||||
|
||||
# Type of disk to create. Defaults to `:disk`
|
||||
#
|
||||
# @return [Symbol]
|
||||
attr_accessor :type
|
||||
|
||||
# Size of disk to create
|
||||
#
|
||||
# @return [Integer,String]
|
||||
attr_accessor :size
|
||||
|
||||
# Path to the location of the disk file (Optional)
|
||||
#
|
||||
# @return [String]
|
||||
attr_accessor :file
|
||||
|
||||
# Determines if this disk is the _main_ disk, or an attachment.
|
||||
# Defaults to true.
|
||||
#
|
||||
# @return [Boolean]
|
||||
attr_accessor :primary
|
||||
|
||||
# Provider specific options
|
||||
#
|
||||
# @return [Hash]
|
||||
attr_accessor :provider_config
|
||||
|
||||
def initialize(type)
|
||||
@logger = Log4r::Logger.new("vagrant::config::vm::disk")
|
||||
|
||||
@type = type
|
||||
@provider_config = {}
|
||||
|
||||
@name = UNSET_VALUE
|
||||
@provider_type = UNSET_VALUE
|
||||
@size = UNSET_VALUE
|
||||
@primary = UNSET_VALUE
|
||||
@file = UNSET_VALUE
|
||||
|
||||
# Internal options
|
||||
@id = SecureRandom.uuid
|
||||
end
|
||||
|
||||
# Helper method for storing provider specific config options
|
||||
#
|
||||
# Expected format is:
|
||||
#
|
||||
# - `provider__diskoption: value`
|
||||
# - `{provider: {diskoption: value, otherdiskoption: value, ...}`
|
||||
#
|
||||
# Duplicates will be overriden
|
||||
#
|
||||
# @param [Hash] options
|
||||
def add_provider_config(**options, &block)
|
||||
current = {}
|
||||
options.each do |k,v|
|
||||
opts = k.to_s.split("__")
|
||||
|
||||
if opts.size == 2
|
||||
current[opts[0].to_sym] = {opts[1].to_sym => v}
|
||||
elsif v.is_a?(Hash)
|
||||
current[k] = v
|
||||
else
|
||||
@logger.warn("Disk option '#{k}' found that does not match expected provider disk config schema.")
|
||||
end
|
||||
end
|
||||
|
||||
current = @provider_config.merge(current) if !@provider_config.empty?
|
||||
@provider_config = current
|
||||
end
|
||||
|
||||
def finalize!
|
||||
# Ensure all config options are set to nil or default value if untouched
|
||||
# by user
|
||||
@type = :disk if @type == UNSET_VALUE
|
||||
@size = nil if @size == UNSET_VALUE
|
||||
@file = nil if @file == UNSET_VALUE
|
||||
|
||||
if @primary == UNSET_VALUE
|
||||
@primary = false
|
||||
end
|
||||
|
||||
if @name == UNSET_VALUE
|
||||
if @primary
|
||||
@name = "vagrant_primary"
|
||||
else
|
||||
@name = "name_#{@type.to_s}_#{@id.split("-").last}"
|
||||
end
|
||||
end
|
||||
|
||||
@provider_config = nil if @provider_config == {}
|
||||
end
|
||||
|
||||
# @return [Array] array of strings of error messages from config option validation
|
||||
def validate(machine)
|
||||
errors = _detected_errors
|
||||
|
||||
# validate type with list of known disk types
|
||||
|
||||
if !DEFAULT_DISK_TYPES.include?(@type)
|
||||
errors << I18n.t("vagrant.config.disk.invalid_type", type: @type,
|
||||
types: DEFAULT_DISK_TYPES.join(', '))
|
||||
end
|
||||
|
||||
if @size && !@size.is_a?(Integer)
|
||||
if @size.is_a?(String)
|
||||
@size = Vagrant::Util::Numeric.string_to_bytes(@size)
|
||||
end
|
||||
|
||||
if !@size
|
||||
errors << I18n.t("vagrant.config.disk.invalid_size", name: @name, machine: machine.name)
|
||||
end
|
||||
end
|
||||
|
||||
if @file
|
||||
if !@file.is_a?(String)
|
||||
errors << I18n.t("vagrant.config.disk.invalid_file_type", file: @file, machine: machine.name)
|
||||
elsif !File.file?(@file)
|
||||
errors << I18n.t("vagrant.config.disk.missing_file", file_path: @file,
|
||||
name: @name, machine: machine.name)
|
||||
end
|
||||
end
|
||||
|
||||
if @provider_config
|
||||
if !@provider_config.keys.include?(machine.provider_name)
|
||||
machine.env.ui.warn(I18n.t("vagrant.config.disk.missing_provider",
|
||||
machine: machine.name,
|
||||
provider_name: machine.provider_name))
|
||||
end
|
||||
end
|
||||
|
||||
errors
|
||||
end
|
||||
|
||||
# The String representation of this Disk.
|
||||
#
|
||||
# @return [String]
|
||||
def to_s
|
||||
"disk config"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -11,7 +11,6 @@ require "vagrant/util/experimental"
|
|||
|
||||
require File.expand_path("../vm_provisioner", __FILE__)
|
||||
require File.expand_path("../vm_subvm", __FILE__)
|
||||
require File.expand_path("../disk", __FILE__)
|
||||
|
||||
module VagrantPlugins
|
||||
module Kernel_V2
|
||||
|
@ -44,7 +43,6 @@ module VagrantPlugins
|
|||
attr_accessor :post_up_message
|
||||
attr_accessor :usable_port_range
|
||||
attr_reader :provisioners
|
||||
attr_reader :disks
|
||||
|
||||
# This is an experimental feature that isn't public yet.
|
||||
attr_accessor :clone
|
||||
|
@ -75,7 +73,6 @@ module VagrantPlugins
|
|||
@hostname = UNSET_VALUE
|
||||
@post_up_message = UNSET_VALUE
|
||||
@provisioners = []
|
||||
@disks = []
|
||||
@usable_port_range = UNSET_VALUE
|
||||
|
||||
# Internal state
|
||||
|
@ -126,28 +123,6 @@ module VagrantPlugins
|
|||
end
|
||||
end
|
||||
|
||||
# Merge defined disks
|
||||
other_disks = other.instance_variable_get(:@disks)
|
||||
new_disks = []
|
||||
@disks.each do |p|
|
||||
other_p = other_disks.find { |o| p.id == o.id }
|
||||
if other_p
|
||||
# there is an override. take it.
|
||||
other_p.config = p.config.merge(other_p.config)
|
||||
|
||||
# Remove duplicate disk config from other
|
||||
p = other_p
|
||||
other_disks.delete(other_p)
|
||||
end
|
||||
|
||||
# there is an override, merge it into the
|
||||
new_disks << p.dup
|
||||
end
|
||||
other_disks.each do |p|
|
||||
new_disks << p.dup
|
||||
end
|
||||
result.instance_variable_set(:@disks, new_disks)
|
||||
|
||||
# Merge the providers by prepending any configuration blocks we
|
||||
# have for providers onto the new configuration.
|
||||
other_providers = other.instance_variable_get(:@__providers)
|
||||
|
@ -409,38 +384,6 @@ module VagrantPlugins
|
|||
@__defined_vms[name].config_procs << [options[:config_version], block] if block
|
||||
end
|
||||
|
||||
# Stores disk config options from Vagrantfile
|
||||
#
|
||||
# @param [Symbol] type
|
||||
# @param [Hash] options
|
||||
# @param [Block] block
|
||||
def disk(type, **options, &block)
|
||||
disk_config = VagrantConfigDisk.new(type)
|
||||
|
||||
# Remove provider__option options before set_options, otherwise will
|
||||
# show up as missing setting
|
||||
# Extract provider hash options as well
|
||||
provider_options = {}
|
||||
options.delete_if do |p,o|
|
||||
if o.is_a?(Hash) || p.to_s.include?("__")
|
||||
provider_options[p] = o
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
disk_config.set_options(options)
|
||||
|
||||
# Add provider config
|
||||
disk_config.add_provider_config(provider_options, &block)
|
||||
|
||||
if !Vagrant::Util::Experimental.feature_enabled?("disk_base_config")
|
||||
@logger.warn("Disk config defined, but experimental feature is not enabled. To use this feature, enable it with the experimental flag `disk_base_config`. Disk will not be added to internal config, and will be ignored.")
|
||||
return
|
||||
end
|
||||
|
||||
@disks << disk_config
|
||||
end
|
||||
|
||||
#-------------------------------------------------------------------
|
||||
# Internal methods, don't call these.
|
||||
#-------------------------------------------------------------------
|
||||
|
@ -604,10 +547,6 @@ module VagrantPlugins
|
|||
end
|
||||
end
|
||||
|
||||
@disks.each do |d|
|
||||
d.finalize!
|
||||
end
|
||||
|
||||
if !current_dir_shared && !@__synced_folders["/vagrant"]
|
||||
synced_folder(".", "/vagrant")
|
||||
end
|
||||
|
@ -670,7 +609,7 @@ module VagrantPlugins
|
|||
errors << I18n.t("vagrant.config.vm.clone_and_box")
|
||||
end
|
||||
|
||||
errors << I18n.t("vagrant.config.vm.hostname_invalid_characters", name: machine.name) if \
|
||||
errors << I18n.t("vagrant.config.vm.hostname_invalid_characters") if \
|
||||
@hostname && @hostname !~ /^[a-z0-9][-.a-z0-9]*$/i
|
||||
|
||||
if @box_version
|
||||
|
@ -809,26 +748,6 @@ module VagrantPlugins
|
|||
end
|
||||
end
|
||||
|
||||
# Validate disks
|
||||
# Check if there is more than one primrary disk defined and throw an error
|
||||
primary_disks = @disks.select { |d| d.primary && d.type == :disk }
|
||||
if primary_disks.size > 1
|
||||
errors << I18n.t("vagrant.config.vm.multiple_primary_disks_error",
|
||||
name: machine.name)
|
||||
end
|
||||
|
||||
disk_names = @disks.map { |d| d.name }
|
||||
duplicate_names = disk_names.detect{ |d| disk_names.count(d) > 1 }
|
||||
if duplicate_names && duplicate_names.size
|
||||
errors << I18n.t("vagrant.config.vm.multiple_disk_names_error",
|
||||
name: duplicate_names)
|
||||
end
|
||||
|
||||
@disks.each do |d|
|
||||
error = d.validate(machine)
|
||||
errors.concat error if !error.empty?
|
||||
end
|
||||
|
||||
# We're done with VM level errors so prepare the section
|
||||
errors = { "vm" => errors }
|
||||
|
||||
|
|
|
@ -9,10 +9,7 @@ module VagrantPlugins
|
|||
|
||||
# Unique name for this provisioner
|
||||
#
|
||||
# Accepts a string, but is ultimately forced into a symbol in the top level method inside
|
||||
# #Config::VM.provision method while being parsed from a Vagrantfile
|
||||
#
|
||||
# @return [Symbol]
|
||||
# @return [String]
|
||||
attr_reader :name
|
||||
|
||||
# Internal unique name for this provisioner
|
||||
|
|
|
@ -15,31 +15,19 @@ module VagrantPlugins
|
|||
@executor = Executor::Local.new
|
||||
end
|
||||
|
||||
# Returns the id for a new container built from `docker build`. Raises
|
||||
# an exception if the id was unable to be captured from the output
|
||||
#
|
||||
# @return [String] id - ID matched from the docker build output.
|
||||
def build(dir, **opts, &block)
|
||||
args = Array(opts[:extra_args])
|
||||
args << dir
|
||||
opts = {with_stderr: true}
|
||||
result = execute('docker', 'build', *args, opts, &block)
|
||||
matches = result.match(/Successfully built (?<id>.+)$/i)
|
||||
if !matches
|
||||
# Check for the new output format 'writing image sha256...'
|
||||
# In this case, docker builtkit is enabled. Its format is different
|
||||
# from standard docker
|
||||
@logger.warn("Could not determine docker container ID. Scanning for buildkit output instead")
|
||||
matches = result.match(/writing image .+:(?<id>[0-9a-z]+) done/i)
|
||||
if !matches
|
||||
# This will cause a stack trace in Vagrant, but it is a bug
|
||||
# if this happens anyways.
|
||||
raise Errors::BuildError, result: result
|
||||
end
|
||||
args = Array(opts[:extra_args])
|
||||
args << dir
|
||||
result = execute('docker', 'build', *args, &block)
|
||||
matches = result.scan(/Successfully built (.+)$/i)
|
||||
if matches.empty?
|
||||
# This will cause a stack trace in Vagrant, but it is a bug
|
||||
# if this happens anyways.
|
||||
raise "UNKNOWN OUTPUT: #{result}"
|
||||
end
|
||||
|
||||
# Return the matched group `id`
|
||||
matches[:id]
|
||||
# Return the last match, and the capture of it
|
||||
matches[-1][0]
|
||||
end
|
||||
|
||||
def create(params, **opts, &block)
|
||||
|
|
|
@ -5,10 +5,6 @@ module VagrantPlugins
|
|||
error_namespace("docker_provider.errors")
|
||||
end
|
||||
|
||||
class BuildError < DockerError
|
||||
error_key(:build_error)
|
||||
end
|
||||
|
||||
class CommunicatorNonDocker < DockerError
|
||||
error_key(:communicator_non_docker)
|
||||
end
|
||||
|
|
|
@ -27,13 +27,7 @@ module VagrantPlugins
|
|||
stdout: result.stdout
|
||||
end
|
||||
|
||||
if opts
|
||||
if opts[:with_stderr]
|
||||
return result.stdout + " " + result.stderr
|
||||
else
|
||||
return result.stdout
|
||||
end
|
||||
end
|
||||
result.stdout
|
||||
end
|
||||
|
||||
def windows?
|
||||
|
|
|
@ -79,7 +79,6 @@ module VagrantPlugins
|
|||
b.use ForwardPorts
|
||||
b.use SetHostname
|
||||
b.use SaneDefaults
|
||||
b.use Disk
|
||||
b.use Customize, "pre-boot"
|
||||
b.use Boot
|
||||
b.use Customize, "post-boot"
|
||||
|
|
|
@ -13,16 +13,7 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
def call(env)
|
||||
machine_folder = env[:machine].provider.driver.read_machine_folder
|
||||
|
||||
begin
|
||||
clean_machine_folder(machine_folder)
|
||||
rescue Errno::EPERM
|
||||
raise Vagrant::Errors::MachineFolderNotAccessible,
|
||||
name: env[:machine].name,
|
||||
path: machine_folder
|
||||
end
|
||||
|
||||
clean_machine_folder(env[:machine].provider.driver.read_machine_folder)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ module VagrantPlugins
|
|||
"5.1" => Version_5_1,
|
||||
"5.2" => Version_5_2,
|
||||
"6.0" => Version_6_0,
|
||||
"6.1" => Version_6_1,
|
||||
}
|
||||
|
||||
if @@version.start_with?("4.2.14")
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
require File.expand_path("../version_6_0", __FILE__)
|
||||
|
||||
module VagrantPlugins
|
||||
module ProviderVirtualBox
|
||||
module Driver
|
||||
# Driver for VirtualBox 6.1.x
|
||||
class Version_6_1 < Version_6_0
|
||||
def initialize(uuid)
|
||||
super
|
||||
|
||||
@logger = Log4r::Logger.new("vagrant::provider::virtualbox_6_1")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -59,7 +59,6 @@ module VagrantPlugins
|
|||
autoload :Version_5_1, File.expand_path("../driver/version_5_1", __FILE__)
|
||||
autoload :Version_5_2, File.expand_path("../driver/version_5_2", __FILE__)
|
||||
autoload :Version_6_0, File.expand_path("../driver/version_6_0", __FILE__)
|
||||
autoload :Version_6_1, File.expand_path("../driver/version_6_1", __FILE__)
|
||||
end
|
||||
|
||||
module Model
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
require_relative "../../../errors"
|
||||
require_relative "../pip/pip"
|
||||
|
||||
module VagrantPlugins
|
||||
module Ansible
|
||||
|
@ -8,31 +7,15 @@ module VagrantPlugins
|
|||
module Arch
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
Pip::pip_install machine, "ansible", ansible_version, pip_args, true
|
||||
|
||||
when :pip_args_only
|
||||
pip_setup machine, pip_install_cmd
|
||||
Pip::pip_install machine, "", "", pip_args, false
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args)
|
||||
if install_mode != :default
|
||||
raise Ansible::Errors::AnsiblePipInstallIsNotSupported
|
||||
else
|
||||
machine.communicate.sudo "pacman -Syy --noconfirm"
|
||||
machine.communicate.sudo "pacman -S --noconfirm ansible"
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
machine.communicate.sudo "pacman -Syy --noconfirm"
|
||||
machine.communicate.sudo "pacman -S --noconfirm base-devel curl git python"
|
||||
|
||||
Pip::get_pip machine, pip_install_cmd
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -7,7 +7,8 @@ module VagrantPlugins
|
|||
module Debian
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
|
@ -32,12 +33,12 @@ INLINE_CRIPT
|
|||
|
||||
machine.communicate.sudo install_backports_if_wheezy_release
|
||||
machine.communicate.sudo "apt-get update -y -qq"
|
||||
machine.communicate.sudo "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" ansible"
|
||||
machine.communicate.sudo "apt-get install -y -qq ansible"
|
||||
end
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
def self.pip_setup(machine, pip_install_cmd="")
|
||||
machine.communicate.sudo "apt-get update -y -qq"
|
||||
machine.communicate.sudo "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" build-essential curl git libssl-dev libffi-dev python-dev"
|
||||
machine.communicate.sudo "apt-get install -y -qq build-essential curl git libssl-dev libffi-dev python-dev"
|
||||
Pip::get_pip machine, pip_install_cmd
|
||||
end
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ module VagrantPlugins
|
|||
module Fedora
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
|
@ -25,7 +25,7 @@ module VagrantPlugins
|
|||
|
||||
private
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
def self.pip_setup(machine, pip_install_cmd="")
|
||||
rpm_package_manager = Facts::rpm_package_manager(machine)
|
||||
|
||||
machine.communicate.sudo "#{rpm_package_manager} install -y curl gcc gmp-devel libffi-devel openssl-devel python-crypto python-devel python-dnf python-setuptools redhat-rpm-config"
|
||||
|
|
|
@ -7,11 +7,11 @@ module VagrantPlugins
|
|||
module FreeBSD
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args)
|
||||
if install_mode != :default
|
||||
raise Ansible::Errors::AnsiblePipInstallIsNotSupported
|
||||
else
|
||||
machine.communicate.sudo "pkg install -qy py36-ansible"
|
||||
machine.communicate.sudo "yes | pkg install ansible"
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -16,23 +16,19 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
args_array = [pip_args, upgrade_arg, "#{package}#{version_arg}"]
|
||||
args_array.reject! { |a| a.nil? || a.empty? }
|
||||
|
||||
pip_install = "pip install"
|
||||
pip_install += " #{args_array.join(' ')}" unless args_array.empty?
|
||||
|
||||
machine.communicate.sudo pip_install
|
||||
machine.communicate.sudo "pip install #{args_array.join(' ')}"
|
||||
end
|
||||
|
||||
def self.get_pip(machine, pip_install_cmd = DEFAULT_PIP_INSTALL_CMD)
|
||||
def self.get_pip(machine, pip_install_cmd=DEFAULT_PIP_INSTALL_CMD)
|
||||
# The objective here is to get pip either by default
|
||||
# or by the argument passed in. The objective is not
|
||||
# or by the argument passed in. The objective is not
|
||||
# to circumvent the pip setup by passing in nothing.
|
||||
# Thus, we stick with the default on an empty string.
|
||||
# Typecast added in the check for safety.
|
||||
|
||||
if pip_install_cmd.to_s.empty?
|
||||
pip_install_cmd = DEFAULT_PIP_INSTALL_CMD
|
||||
pip_install_cmd=DEFAULT_PIP_INSTALL_CMD
|
||||
end
|
||||
|
||||
machine.ui.detail I18n.t("vagrant.provisioners.ansible.installing_pip")
|
||||
|
|
|
@ -8,7 +8,7 @@ module VagrantPlugins
|
|||
module RedHat
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
|
@ -33,7 +33,7 @@ module VagrantPlugins
|
|||
machine.communicate.sudo "#{rpm_package_manager} -y --enablerepo=epel install ansible"
|
||||
end
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
def self.pip_setup(machine, pip_install_cmd="")
|
||||
rpm_package_manager = Facts::rpm_package_manager(machine)
|
||||
|
||||
machine.communicate.sudo("#{rpm_package_manager} -y install curl gcc libffi-devel openssl-devel python-crypto python-devel python-setuptools")
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
require_relative "../../../errors"
|
||||
|
||||
module VagrantPlugins
|
||||
module Ansible
|
||||
|
@ -7,7 +6,7 @@ module VagrantPlugins
|
|||
module SUSE
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args)
|
||||
if install_mode != :default
|
||||
raise Ansible::Errors::AnsiblePipInstallIsNotSupported
|
||||
else
|
||||
|
|
|
@ -7,7 +7,7 @@ module VagrantPlugins
|
|||
module Ubuntu
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
if install_mode != :default
|
||||
Debian::AnsibleInstall::ansible_install machine, install_mode, ansible_version, pip_args, pip_install_cmd
|
||||
else
|
||||
|
@ -21,13 +21,13 @@ module VagrantPlugins
|
|||
unless machine.communicate.test("test -x \"$(which add-apt-repository)\"")
|
||||
machine.communicate.sudo """
|
||||
apt-get update -y -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq software-properties-common --option \"Dpkg::Options::=--force-confold\"
|
||||
apt-get install -y -qq software-properties-common
|
||||
"""
|
||||
end
|
||||
machine.communicate.sudo """
|
||||
add-apt-repository ppa:ansible/ansible -y && \
|
||||
apt-get update -y -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq ansible --option \"Dpkg::Options::=--force-confold\"
|
||||
apt-get install -y -qq ansible
|
||||
"""
|
||||
end
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ Vagrant.configure("2") do |config|
|
|||
# information on available options.
|
||||
|
||||
# Enable provisioning with a shell script. Additional provisioners such as
|
||||
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
|
||||
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
|
||||
# documentation for more information about their specific syntax and use.
|
||||
# config.vm.provision "shell", inline: <<-SHELL
|
||||
# apt-get update
|
||||
|
|
|
@ -578,7 +578,7 @@ en:
|
|||
The specified checksum type is not supported by Vagrant: %{type}.
|
||||
Vagrant supports the following checksum types:
|
||||
|
||||
%{types}
|
||||
md5, sha1, sha256
|
||||
box_checksum_mismatch: |-
|
||||
The checksum of the downloaded box did not match the expected
|
||||
value. Please verify that you have the proper URL setup and that
|
||||
|
@ -964,15 +964,6 @@ en:
|
|||
If you believe this message is in error, please check the process
|
||||
listing for any "ruby" or "vagrant" processes and kill them. Then
|
||||
try again.
|
||||
machine_folder_not_accessible: |-
|
||||
Vagrant attempted to clean the machine folder for the machine '%{name}'
|
||||
but does not have permission to read the following path:
|
||||
|
||||
%{path}
|
||||
|
||||
Please ensure that Vagrant has the proper permissions to access the path
|
||||
above. You may need to grant this permission to the terminal emulator
|
||||
running Vagrant as well.
|
||||
machine_guest_not_ready: |-
|
||||
Guest-specific operations were attempted on a machine that is not
|
||||
ready for guest communication. This should not happen and a bug
|
||||
|
@ -1801,17 +1792,6 @@ en:
|
|||
# Translations for config validation errors
|
||||
#-------------------------------------------------------------------------------
|
||||
config:
|
||||
disk:
|
||||
invalid_type: |-
|
||||
Disk type '%{type}' is not a valid type. Please pick one of the following supported disk types: %{types}
|
||||
invalid_size: |-
|
||||
Config option 'size' for disk '%{name}' on guest '%{machine}' is not an integer
|
||||
invalid_file_type: |-
|
||||
Disk config option 'file' for '%{machine}' is not a string.
|
||||
missing_file: |-
|
||||
Disk file '%{file_path}' for disk '%{name}' on machine '%{machine}' does not exist.
|
||||
missing_provider: |-
|
||||
Guest '%{machine}' using provider '%{provider_name}' has provider specific config options for a provider other than '%{provider_name}'. These provider config options will be ignored for this guest
|
||||
common:
|
||||
bad_field: "The following settings shouldn't exist: %{fields}"
|
||||
chef:
|
||||
|
@ -1908,14 +1888,10 @@ en:
|
|||
box_missing: "A box must be specified."
|
||||
clone_and_box: "Only one of clone or box can be specified."
|
||||
hostname_invalid_characters: |-
|
||||
The hostname set for the VM '%{name}' should only contain letters, numbers,
|
||||
The hostname set for the VM should only contain letters, numbers,
|
||||
hyphens or dots. It cannot start with a hyphen or dot.
|
||||
ignore_provider_config: |-
|
||||
Ignoring provider config for validation...
|
||||
multiple_primary_disks_error: |-
|
||||
There are more than one primary disks defined for guest '%{name}'. Please ensure that only one disk has been defined as a primary disk.
|
||||
multiple_disk_names_error: |-
|
||||
Duplicate disk names defined: '%{name}'. Disk names must be unique.
|
||||
name_invalid: |-
|
||||
The sub-VM name '%{name}' is invalid. Please don't use special characters.
|
||||
network_ip_ends_in_one: |-
|
||||
|
@ -2077,9 +2053,6 @@ en:
|
|||
No pushed snapshot found!
|
||||
|
||||
Use `vagrant snapshot push` to push a snapshot to restore to.
|
||||
save:
|
||||
vm_not_created: |-
|
||||
Machine '%{name}' has not been created yet, and therefore cannot save snapshots. Skipping...
|
||||
status:
|
||||
aborted: |-
|
||||
The VM is in an aborted state. This means that it was abruptly
|
||||
|
@ -2162,9 +2135,6 @@ en:
|
|||
runner:
|
||||
waiting_cleanup: "Waiting for cleanup before exiting..."
|
||||
exit_immediately: "Exiting immediately, without cleanup!"
|
||||
disk:
|
||||
provider_unsupported: |-
|
||||
Guest provider '%{provider}' does not support the disk feature, and will not use the disk configuration defined.
|
||||
vm:
|
||||
boot:
|
||||
booting: Booting VM...
|
||||
|
|
|
@ -159,8 +159,6 @@ en:
|
|||
run exits and doesn't keep running.
|
||||
|
||||
errors:
|
||||
build_error: |-
|
||||
Vagrant received unknown output from `docker build` while building a container: %{result}
|
||||
compose_lock_timeout: |-
|
||||
Vagrant encountered a timeout waiting for the docker compose driver
|
||||
to become available. Please try to run your command again. If you
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
# VAGRANT-BEGIN: <%= user %> <%= uuid %>
|
||||
<% folders.each do |dirs, opts| %>
|
||||
<% dirs.each do |d| %>
|
||||
<%= d %> <%=opts[:bsd__compiled_nfs_options] %> <%= ips.join(" ") %>
|
||||
<% end %>
|
||||
<% end %>
|
||||
# VAGRANT-END: <%= user %> <%= uuid %>
|
|
@ -1,140 +0,0 @@
|
|||
require File.expand_path("../../../../../base", __FILE__)
|
||||
|
||||
require Vagrant.source_root.join("plugins/commands/box/command/outdated")
|
||||
|
||||
describe VagrantPlugins::CommandBox::Command::Outdated do
|
||||
include_context "unit"
|
||||
|
||||
let(:argv) { [] }
|
||||
let(:iso_env) do
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
subject { described_class.new(argv, iso_env) }
|
||||
|
||||
let(:action_runner) { double("action_runner") }
|
||||
|
||||
before do
|
||||
allow(iso_env).to receive(:action_runner).and_return(action_runner)
|
||||
end
|
||||
|
||||
context "with force argument" do
|
||||
let(:argv) { ["--force"] }
|
||||
|
||||
it "passes along the force update option" do
|
||||
expect(action_runner).to receive(:run).with(any_args) { |action, **opts|
|
||||
expect(opts[:box_outdated_force]).to be_truthy
|
||||
true
|
||||
}
|
||||
subject.execute
|
||||
end
|
||||
end
|
||||
|
||||
context "with global argument" do
|
||||
let(:argv) { ["--global"] }
|
||||
|
||||
it "calls outdated_global" do
|
||||
expect(subject).to receive(:outdated_global)
|
||||
|
||||
subject.execute
|
||||
end
|
||||
|
||||
describe ".outdated_global" do
|
||||
let(:test_iso_env) { isolated_environment }
|
||||
|
||||
let(:md) {
|
||||
md = Vagrant::BoxMetadata.new(StringIO.new(<<-RAW))
|
||||
{
|
||||
"name": "foo",
|
||||
"versions": [
|
||||
{
|
||||
"version": "1.0"
|
||||
},
|
||||
{
|
||||
"version": "1.1",
|
||||
"providers": [
|
||||
{
|
||||
"name": "virtualbox",
|
||||
"url": "bar"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"version": "1.2",
|
||||
"providers": [
|
||||
{
|
||||
"name": "vmware",
|
||||
"url": "baz"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
RAW
|
||||
}
|
||||
|
||||
let(:collection) do
|
||||
collection = double("collection")
|
||||
allow(collection).to receive(:all).and_return([box])
|
||||
allow(collection).to receive(:find).and_return(box)
|
||||
collection
|
||||
end
|
||||
|
||||
context "when latest version is available for provider" do
|
||||
let(:box) do
|
||||
box_dir = test_iso_env.box3("foo", "1.0", :vmware)
|
||||
box = Vagrant::Box.new(
|
||||
"foo", :vmware, "1.0", box_dir, metadata_url: "foo")
|
||||
allow(box).to receive(:load_metadata).and_return(md)
|
||||
box
|
||||
end
|
||||
|
||||
it "displays the latest version" do
|
||||
allow(iso_env).to receive(:boxes).and_return(collection)
|
||||
|
||||
expect(I18n).to receive(:t).with(/box_outdated$/, hash_including(latest: "1.2"))
|
||||
|
||||
subject.outdated_global({})
|
||||
end
|
||||
end
|
||||
|
||||
context "when latest version isn't available for provider" do
|
||||
let(:box) do
|
||||
box_dir = test_iso_env.box3("foo", "1.0", :virtualbox)
|
||||
box = Vagrant::Box.new(
|
||||
"foo", :virtualbox, "1.0", box_dir, metadata_url: "foo")
|
||||
allow(box).to receive(:load_metadata).and_return(md)
|
||||
box
|
||||
end
|
||||
|
||||
it "displays the latest version for that provider" do
|
||||
allow(iso_env).to receive(:boxes).and_return(collection)
|
||||
|
||||
expect(I18n).to receive(:t).with(/box_outdated$/, hash_including(latest: "1.1"))
|
||||
|
||||
subject.outdated_global({})
|
||||
end
|
||||
end
|
||||
|
||||
context "when no versions are available for provider" do
|
||||
let(:box) do
|
||||
box_dir = test_iso_env.box3("foo", "1.0", :libvirt)
|
||||
box = Vagrant::Box.new(
|
||||
"foo", :libvirt, "1.0", box_dir, metadata_url: "foo")
|
||||
allow(box).to receive(:load_metadata).and_return(md)
|
||||
box
|
||||
end
|
||||
|
||||
it "displays up to date message" do
|
||||
allow(iso_env).to receive(:boxes).and_return(collection)
|
||||
|
||||
expect(I18n).to receive(:t).with(/box_up_to_date$/, hash_including(version: "1.0"))
|
||||
|
||||
subject.outdated_global({})
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -52,15 +52,6 @@ describe VagrantPlugins::CloudCommand::Command::Publish do
|
|||
let(:argv) { ["vagrant/box", "1.0.0", "virtualbox"] }
|
||||
|
||||
it "shows help" do
|
||||
expect { subject.execute }.
|
||||
to raise_error(Vagrant::Errors::CLIInvalidUsage)
|
||||
end
|
||||
end
|
||||
|
||||
context "missing box file" do
|
||||
let(:argv) { ["vagrant/box", "1.0.0", "virtualbox", "/notreal/file.box"] }
|
||||
|
||||
it "raises an exception" do
|
||||
allow(File).to receive(:file?).and_return(false)
|
||||
expect { subject.execute }.
|
||||
to raise_error(Vagrant::Errors::BoxFileNotExist)
|
||||
|
|
|
@ -92,6 +92,8 @@ describe VagrantPlugins::CommandSnapshot::Command::Save do
|
|||
it "doesn't snapshot a non-existent machine" do
|
||||
machine.id = nil
|
||||
|
||||
expect(subject).to receive(:with_target_vms){}
|
||||
|
||||
expect(machine).to_not receive(:action)
|
||||
expect(subject.execute).to eq(0)
|
||||
end
|
||||
|
|
|
@ -16,9 +16,9 @@ describe 'VagrantPlugins::GuestAlpine::Cap::RSync' do
|
|||
VagrantPlugins::GuestAlpine::Plugin.components.guest_capabilities[:alpine].get(:rsync_install)
|
||||
end
|
||||
|
||||
it 'should install rsync with --update-cache flag' do
|
||||
it 'should install rsync' do
|
||||
# communicator.should_receive(:sudo).with('apk add rsync')
|
||||
expect(communicator).to receive(:sudo).with('apk add --update-cache rsync')
|
||||
expect(communicator).to receive(:sudo).with('apk add rsync')
|
||||
allow_message_expectations_on_nil
|
||||
described_class.rsync_install(machine)
|
||||
end
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
require File.expand_path("../../../../base", __FILE__)
|
||||
|
||||
|
||||
describe VagrantPlugins::GuestAlpine::Plugin do
|
||||
let(:manager) { double("manager") }
|
||||
|
||||
before do
|
||||
allow(Vagrant::Plugin::Manager).to receive(:instance).and_return(manager)
|
||||
end
|
||||
|
||||
context "when vagrant-alpine plugin is not installed" do
|
||||
before do
|
||||
allow(manager).to receive(:installed_plugins).and_return({})
|
||||
end
|
||||
|
||||
it "should not display a warning" do
|
||||
expect($stderr).to_not receive(:puts)
|
||||
VagrantPlugins::GuestAlpine::Plugin.check_community_plugin
|
||||
end
|
||||
end
|
||||
|
||||
context "when vagrant-alpine plugin is installed" do
|
||||
before do
|
||||
allow(manager).to receive(:installed_plugins).and_return({ "vagrant-alpine" => {} })
|
||||
end
|
||||
|
||||
it "should display a warning" do
|
||||
expect($stderr).to receive(:puts).with(/vagrant plugin uninstall vagrant-alpine/)
|
||||
VagrantPlugins::GuestAlpine::Plugin.check_community_plugin
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,167 +0,0 @@
|
|||
require_relative "../../../../base"
|
||||
|
||||
describe "VagrantPlugins::GuestDarwin::Cap::MountVmwareSharedFolder" do
|
||||
let(:described_class) do
|
||||
VagrantPlugins::GuestDarwin::Plugin
|
||||
.components
|
||||
.guest_capabilities[:darwin]
|
||||
.get(:mount_vmware_shared_folder)
|
||||
end
|
||||
|
||||
let(:machine) { double("machine", communicate: communicator, id: "MACHINE_ID") }
|
||||
let(:communicator) { double("communicator") }
|
||||
|
||||
before do
|
||||
allow(communicator).to receive(:test)
|
||||
allow(communicator).to receive(:sudo)
|
||||
allow(VagrantPlugins::GuestDarwin::Plugin).to receive(:action_hook)
|
||||
end
|
||||
|
||||
describe ".mount_vmware_shared_folder" do
|
||||
let(:name) { "-vagrant" }
|
||||
let(:guestpath) { "/vagrant" }
|
||||
let(:options) { {} }
|
||||
|
||||
before do
|
||||
allow(described_class).to receive(:system_firmlink?)
|
||||
described_class.reset!
|
||||
end
|
||||
|
||||
after { described_class.
|
||||
mount_vmware_shared_folder(machine, name, guestpath, options) }
|
||||
|
||||
context "with APFS root container" do
|
||||
before do
|
||||
expect(communicator).to receive(:test).with("test -d /System/Volumes/Data").and_return(true)
|
||||
end
|
||||
|
||||
it "should check for existing entry" do
|
||||
expect(communicator).to receive(:test).with(/synthetic\.conf/)
|
||||
end
|
||||
|
||||
it "should register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
|
||||
context "with guest path within existing directory" do
|
||||
let(:guestpath) { "/Users/vagrant/workspace" }
|
||||
|
||||
it "should test if guest path is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -f/)
|
||||
end
|
||||
|
||||
it "should not test if guest path is a directory if guest path is symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).not_to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should test if guest path is directory if not a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a directory" do
|
||||
expect(communicator).to receive(:test).with(/test -d/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -Rf/)
|
||||
end
|
||||
|
||||
it "should create the symlink to the vmware folder" do
|
||||
expect(communicator).to receive(:sudo).with(/ln -s/)
|
||||
end
|
||||
|
||||
it "should create the symlink within the writable APFS container" do
|
||||
expect(communicator).to receive(:sudo).with(%r{ln -s .+/System/Volumes/Data.+})
|
||||
end
|
||||
|
||||
it "should register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
|
||||
context "when firmlink is provided by the system" do
|
||||
before { expect(described_class).to receive(:system_firmlink?).and_return(true) }
|
||||
|
||||
it "should not register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).not_to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context "with non-APFS root container" do
|
||||
before do
|
||||
expect(communicator).to receive(:test).with("test -d /System/Volumes/Data").and_return(false)
|
||||
end
|
||||
|
||||
it "should test if guest path is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -f/)
|
||||
end
|
||||
|
||||
it "should not test if guest path is a directory if guest path is symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).not_to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should test if guest path is directory if not a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a directory" do
|
||||
expect(communicator).to receive(:test).with(/test -d/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -Rf/)
|
||||
end
|
||||
|
||||
it "should create the symlink to the vmware folder" do
|
||||
expect(communicator).to receive(:sudo).with(/ln -s/)
|
||||
end
|
||||
|
||||
it "should not register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).not_to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe ".system_firmlink?" do
|
||||
before { described_class.reset! }
|
||||
|
||||
context "when file does not exist" do
|
||||
before { allow(File).to receive(:exist?).with("/usr/share/firmlinks").and_return(false) }
|
||||
|
||||
it "should always return false" do
|
||||
expect(described_class.system_firmlink?("test")).to be_falsey
|
||||
end
|
||||
end
|
||||
|
||||
context "when file does exist" do
|
||||
let(:content) {
|
||||
["/Users\tUsers",
|
||||
"/usr/local\tusr/local"]
|
||||
}
|
||||
|
||||
before do
|
||||
expect(File).to receive(:exist?).with("/usr/share/firmlinks").and_return(true)
|
||||
expect(File).to receive(:readlines).with("/usr/share/firmlinks").and_return(content)
|
||||
end
|
||||
|
||||
it "should return true when firmlink exists" do
|
||||
expect(described_class.system_firmlink?("/Users")).to be_truthy
|
||||
end
|
||||
|
||||
it "should return true when firmlink is not prefixed with /" do
|
||||
expect(described_class.system_firmlink?("Users")).to be_truthy
|
||||
end
|
||||
|
||||
it "should return false when firmlink does not exist" do
|
||||
expect(described_class.system_firmlink?("/testing")).to be_falsey
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,50 +0,0 @@
|
|||
require_relative "../../../../base"
|
||||
|
||||
require Vagrant.source_root.join("plugins/guests/linux/cap/reboot")
|
||||
|
||||
describe "VagrantPlugins::GuestLinux::Cap::Reboot" do
|
||||
let(:described_class) do
|
||||
VagrantPlugins::GuestLinux::Plugin.components.guest_capabilities[:linux].get(:wait_for_reboot)
|
||||
end
|
||||
|
||||
let(:machine) { double("machine") }
|
||||
let(:guest) { double("guest") }
|
||||
let(:communicator) { VagrantTests::DummyCommunicator::Communicator.new(machine) }
|
||||
let(:ui) { double("ui") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(machine).to receive(:guest).and_return(guest)
|
||||
allow(machine.guest).to receive(:ready?).and_return(true)
|
||||
allow(machine).to receive(:ui).and_return(ui)
|
||||
allow(ui).to receive(:info)
|
||||
end
|
||||
|
||||
after do
|
||||
communicator.verify_expectations!
|
||||
end
|
||||
|
||||
describe ".reboot" do
|
||||
it "reboots the vm" do
|
||||
allow(communicator).to receive(:execute)
|
||||
|
||||
expect(communicator).to receive(:execute).with(/reboot/, nil).and_return(0)
|
||||
expect(described_class).to receive(:wait_for_reboot)
|
||||
|
||||
described_class.reboot(machine)
|
||||
end
|
||||
|
||||
context "user output" do
|
||||
before do
|
||||
allow(communicator).to receive(:execute)
|
||||
allow(described_class).to receive(:wait_for_reboot)
|
||||
end
|
||||
|
||||
after { described_class.reboot(machine) }
|
||||
|
||||
it "sends message to user that guest is rebooting" do
|
||||
expect(ui).to receive(:info)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -24,7 +24,7 @@ describe "VagrantPlugins::GuestRedHat::Cap:NFSClient" do
|
|||
it "installs nfs client" do
|
||||
cap.nfs_client_install(machine)
|
||||
expect(comm.received_commands[0]).to match(/install nfs-utils/)
|
||||
expect(comm.received_commands[0]).to match(/\/bin\/systemctl restart rpcbind nfs-server/)
|
||||
expect(comm.received_commands[0]).to match(/\/bin\/systemctl restart rpcbind nfs/)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -25,14 +25,14 @@ describe "VagrantPlugins::GuestSUSE::Cap::ChangeHostName" do
|
|||
let(:basename) { "banana-rama" }
|
||||
|
||||
it "sets the hostname" do
|
||||
comm.stub_command('test "$(hostnamectl --static status)" = "#{basename}"', exit_code: 1)
|
||||
comm.stub_command("getent hosts '#{name}'", exit_code: 1)
|
||||
|
||||
cap.change_host_name(machine, name)
|
||||
expect(comm.received_commands[1]).to match(/hostnamectl set-hostname '#{basename}'/)
|
||||
end
|
||||
|
||||
it "does not change the hostname if already set" do
|
||||
comm.stub_command('test "$(hostnamectl --static status)" = "#{basename}"', exit_code: 0)
|
||||
comm.stub_command("getent hosts '#{name}'", exit_code: 0)
|
||||
|
||||
cap.change_host_name(machine, name)
|
||||
expect(comm.received_commands.size).to eq(1)
|
||||
|
|
|
@ -21,9 +21,6 @@ describe VagrantPlugins::HostBSD::Cap::NFS do
|
|||
allow(described_class).to receive(:system)
|
||||
allow(File).to receive(:writable?).with("/etc/exports")
|
||||
allow(ui).to receive(:info)
|
||||
|
||||
allow(Vagrant::Util::Subprocess).to receive(:execute).with("nfsd", "checkexports").
|
||||
and_return(Vagrant::Util::Subprocess::Result.new(0, "", ""))
|
||||
end
|
||||
|
||||
it "should execute successfully when no folders are defined" do
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
require_relative "../../../../base"
|
||||
|
||||
require_relative "../../../../../../plugins/hosts/darwin/cap/nfs"
|
||||
|
||||
describe VagrantPlugins::HostDarwin::Cap::NFS do
|
||||
include_context "unit"
|
||||
|
||||
let(:subject){ VagrantPlugins::HostDarwin::Cap::NFS }
|
||||
|
||||
it "exists" do
|
||||
expect(subject).to_not be(nil)
|
||||
end
|
||||
|
||||
it "should use nfs/exports_darwin as its template" do
|
||||
expect(subject.nfs_exports_template(nil)).to eq("nfs/exports_darwin")
|
||||
end
|
||||
end
|
|
@ -188,7 +188,7 @@ EOH
|
|||
:linux__nfs_options=>["rw","all_squash"]}}
|
||||
valid_id = SecureRandom.uuid
|
||||
content =<<-EOH
|
||||
# VAGRANT-BEGIN: #{Process.uid} #{valid_id}
|
||||
\n# VAGRANT-BEGIN: #{Process.uid} #{valid_id}
|
||||
"/home/vagrant" 127.0.0.1(rw,all_squash,anonuid=,anongid=,fsid=)
|
||||
"/newhome/otherproject" 127.0.0.1(rw,all_squash,anonuid=,anongid=,fsid=)
|
||||
# VAGRANT-END: #{Process.uid} #{valid_id}
|
||||
|
@ -337,50 +337,4 @@ EOH
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe ".modinfo_path" do
|
||||
let(:cap){ VagrantPlugins::HostLinux::Cap::NFS }
|
||||
|
||||
context "with modinfo on PATH" do
|
||||
before do
|
||||
expect(Vagrant::Util::Which).to receive(:which).with("modinfo").and_return("/usr/bin/modinfo")
|
||||
end
|
||||
|
||||
it "should use full path to modinfo" do
|
||||
expect(cap.modinfo_path).to eq("/usr/bin/modinfo")
|
||||
end
|
||||
end
|
||||
|
||||
context "with modinfo at /sbin/modinfo" do
|
||||
before do
|
||||
expect(Vagrant::Util::Which).to receive(:which).with("modinfo").and_return(nil)
|
||||
expect(File).to receive(:file?).with("/sbin/modinfo").and_return(true)
|
||||
end
|
||||
|
||||
it "should use /sbin/modinfo" do
|
||||
expect(cap.modinfo_path).to eq("/sbin/modinfo")
|
||||
end
|
||||
end
|
||||
|
||||
context "modinfo not found" do
|
||||
before do
|
||||
expect(Vagrant::Util::Which).to receive(:which).with("modinfo").and_return(nil)
|
||||
expect(File).to receive(:file?).with("/sbin/modinfo").and_return(false)
|
||||
end
|
||||
|
||||
it "should use modinfo" do
|
||||
expect(cap.modinfo_path).to eq("modinfo")
|
||||
end
|
||||
end
|
||||
|
||||
context "with cached value for modinfo_path" do
|
||||
before do
|
||||
cap.instance_variable_set(:@_modinfo_path, "/usr/local/bin/modinfo")
|
||||
end
|
||||
|
||||
it "should use cached value" do
|
||||
expect(cap.modinfo_path).to eq("/usr/local/bin/modinfo")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
require File.expand_path("../../../../base", __FILE__)
|
||||
|
||||
require Vagrant.source_root.join("plugins/kernel_v2/config/disk")
|
||||
|
||||
describe VagrantPlugins::Kernel_V2::VagrantConfigDisk do
|
||||
include_context "unit"
|
||||
|
||||
let(:type) { :disk }
|
||||
|
||||
subject { described_class.new(type) }
|
||||
|
||||
let(:machine) { double("machine") }
|
||||
|
||||
def assert_invalid
|
||||
errors = subject.validate(machine)
|
||||
if !errors.empty? { |v| !v.empty? }
|
||||
raise "No errors: #{errors.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
def assert_valid
|
||||
errors = subject.validate(machine)
|
||||
if !errors.empty? { |v| v.empty? }
|
||||
raise "Errors: #{errors.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
before do
|
||||
env = double("env")
|
||||
|
||||
subject.name = "foo"
|
||||
subject.size = 100
|
||||
end
|
||||
|
||||
describe "with defaults" do
|
||||
it "is valid with test defaults" do
|
||||
subject.finalize!
|
||||
assert_valid
|
||||
end
|
||||
|
||||
it "sets a disk type" do
|
||||
subject.finalize!
|
||||
expect(subject.type).to eq(type)
|
||||
end
|
||||
|
||||
it "defaults to non-primray disk" do
|
||||
subject.finalize!
|
||||
expect(subject.primary).to eq(false)
|
||||
end
|
||||
end
|
||||
|
||||
describe "defining a new config that needs to match internal restraints" do
|
||||
before do
|
||||
end
|
||||
end
|
||||
end
|
|
@ -549,58 +549,6 @@ describe VagrantPlugins::Kernel_V2::VMConfig do
|
|||
end
|
||||
end
|
||||
|
||||
describe "#disk" do
|
||||
before(:each) do
|
||||
allow(Vagrant::Util::Experimental).to receive(:feature_enabled?).
|
||||
with("disk_base_config").and_return("true")
|
||||
end
|
||||
|
||||
it "stores the disks" do
|
||||
subject.disk(:disk, size: 100)
|
||||
subject.disk(:disk, size: 1000, primary: false, name: "storage")
|
||||
subject.finalize!
|
||||
|
||||
assert_valid
|
||||
|
||||
d = subject.disks
|
||||
expect(d.length).to eql(2)
|
||||
expect(d[0].size).to eql(100)
|
||||
expect(d[1].size).to eql(1000)
|
||||
expect(d[1].name).to eql("storage")
|
||||
end
|
||||
|
||||
it "raises an error with duplicate names" do
|
||||
subject.disk(:disk, size: 100, name: "foo")
|
||||
subject.disk(:disk, size: 1000, name: "foo", primary: false)
|
||||
subject.finalize!
|
||||
assert_invalid
|
||||
end
|
||||
|
||||
it "does not merge duplicate disks" do
|
||||
subject.disk(:disk, size: 1000, primary: false, name: "storage")
|
||||
subject.disk(:disk, size: 1000, primary: false, name: "backup")
|
||||
|
||||
merged = subject.merge(subject)
|
||||
merged_disks = merged.disks
|
||||
|
||||
expect(merged_disks.length).to eql(2)
|
||||
end
|
||||
|
||||
it "ignores non-overriding runs" do
|
||||
subject.disk(:disk, name: "foo")
|
||||
|
||||
other = described_class.new
|
||||
other.disk(:disk, name: "bar", primary: false)
|
||||
|
||||
merged = subject.merge(other)
|
||||
merged_disks = merged.disks
|
||||
|
||||
expect(merged_disks.length).to eql(2)
|
||||
expect(merged_disks[0].name).to eq("foo")
|
||||
expect(merged_disks[1].name).to eq("bar")
|
||||
end
|
||||
end
|
||||
|
||||
describe "#synced_folder(s)" do
|
||||
it "defaults to sharing the current directory" do
|
||||
subject.finalize!
|
||||
|
|
|
@ -324,11 +324,8 @@ describe VagrantPlugins::DockerProvider::Action::PrepareNetworks do
|
|||
|
||||
describe "#process_public_network" do
|
||||
let(:options) { {:ip=>"172.30.130.2", :subnet=>"172.30.0.0/16", :driver=>"bridge", :id=>"30e017d5-488f-5a2f-a3ke-k8dce8246b60"} }
|
||||
let(:addr) { double("addr", ip: true, ip_address: "192.168.1.139") }
|
||||
let(:netmask) { double("netmask", ip_unpack: ["255.255.255.0"]) }
|
||||
let(:ipaddr) { double("ipaddr", prefix: 22, succ: "10.1.10.2", ipv4?: true,
|
||||
ipv6?: false, to_i: 4294967040, name: "ens20u1u2",
|
||||
addr: addr, netmask: netmask) }
|
||||
ipv6?: false, to_i: 4294967040) }
|
||||
|
||||
it "raises an error if there are no network interfaces" do
|
||||
expect(subject).to receive(:list_interfaces).and_return([])
|
||||
|
@ -346,12 +343,7 @@ describe VagrantPlugins::DockerProvider::Action::PrepareNetworks do
|
|||
allow(driver).to receive(:network_containing_address).
|
||||
with("10.1.10.2").and_return("vagrant_network_public")
|
||||
|
||||
# mock the call to PrepareNetworks.list_interfaces so that we don't depend
|
||||
# on the current network interfaces
|
||||
allow(subject).to receive(:list_interfaces).
|
||||
and_return([ipaddr])
|
||||
|
||||
network_name, _network_options = subject.process_public_network(options, {}, env)
|
||||
network_name, network_options = subject.process_public_network(options, {}, env)
|
||||
expect(network_name).to eq("vagrant_network_public")
|
||||
end
|
||||
end
|
||||
|
|
|
@ -152,27 +152,6 @@ describe VagrantPlugins::DockerProvider::Driver do
|
|||
].to_json }
|
||||
|
||||
|
||||
describe '#build' do
|
||||
let(:result) { "Successfully built 1a2b3c4d" }
|
||||
let(:buildkit_result) { "writing image sha256:1a2b3c4d done" }
|
||||
let(:cid) { "1a2b3c4d" }
|
||||
|
||||
it "builds a container with standard docker" do
|
||||
allow(subject).to receive(:execute).and_return(result)
|
||||
|
||||
container_id = subject.build("/tmp/fakedir")
|
||||
|
||||
expect(container_id).to eq(cid)
|
||||
end
|
||||
|
||||
it "builds a container with buildkit docker" do
|
||||
allow(subject).to receive(:execute).and_return(buildkit_result)
|
||||
|
||||
container_id = subject.build("/tmp/fakedir")
|
||||
|
||||
expect(container_id).to eq(cid)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#create' do
|
||||
let(:params) { {
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
require_relative '../base'
|
||||
|
||||
describe VagrantPlugins::ProviderVirtualBox::Action::CleanMachineFolder do
|
||||
let(:app) { double("app") }
|
||||
let(:driver) { double("driver") }
|
||||
let(:machine) { double("machine", provider: double("provider", driver: driver), name: "") }
|
||||
let(:env) {
|
||||
{ machine: machine }
|
||||
}
|
||||
let(:subject) { described_class.new(app, env) }
|
||||
|
||||
before do
|
||||
allow(driver).to receive(:read_machine_folder)
|
||||
end
|
||||
|
||||
context "machine folder is not accessible" do
|
||||
before do
|
||||
allow(subject).to receive(:clean_machine_folder).and_raise(Errno::EPERM)
|
||||
end
|
||||
|
||||
it "raises an error" do
|
||||
expect { subject.call(env) }.to raise_error(Vagrant::Errors::MachineFolderNotAccessible)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,57 +0,0 @@
|
|||
require_relative "../../../../../../base"
|
||||
require_relative "../shared/pip_ansible_install_examples"
|
||||
|
||||
require Vagrant.source_root.join("plugins/provisioners/ansible/cap/guest/arch/ansible_install")
|
||||
|
||||
describe VagrantPlugins::Ansible::Cap::Guest::Arch::AnsibleInstall do
|
||||
include_context "unit"
|
||||
|
||||
subject { VagrantPlugins::Ansible::Cap::Guest::Arch::AnsibleInstall }
|
||||
|
||||
let(:iso_env) do
|
||||
# We have to create a Vagrantfile so there is a root path
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
let(:machine) { iso_env.machine(iso_env.machine_names[0], :dummy) }
|
||||
let(:communicator) { double("comm") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(communicator).to receive(:execute).and_return(true)
|
||||
end
|
||||
|
||||
describe "#pip_setup" do
|
||||
it "install required Arch packages and call Cap::Guest::Pip::get_pip" do
|
||||
pip_install_cmd = "foo"
|
||||
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("pacman -Syy --noconfirm")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("pacman -S --noconfirm base-devel curl git python")
|
||||
expect(VagrantPlugins::Ansible::Cap::Guest::Pip).to receive(:get_pip).once.ordered.
|
||||
with(machine, pip_install_cmd)
|
||||
|
||||
subject.pip_setup(machine, pip_install_cmd)
|
||||
end
|
||||
end
|
||||
|
||||
describe "#ansible_install" do
|
||||
|
||||
it_behaves_like "Ansible setup via pip"
|
||||
|
||||
describe "when install_mode is :default (or unknown)" do
|
||||
it "installs ansible with 'pacman' package manager" do
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("pacman -Syy --noconfirm")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("pacman -S --noconfirm ansible")
|
||||
|
||||
subject.ansible_install(machine, :default, "", "", "")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,50 +0,0 @@
|
|||
require_relative "../../../../../../base"
|
||||
require_relative "../shared/pip_ansible_install_examples"
|
||||
|
||||
|
||||
require Vagrant.source_root.join("plugins/provisioners/ansible/cap/guest/debian/ansible_install")
|
||||
|
||||
|
||||
describe VagrantPlugins::Ansible::Cap::Guest::Debian::AnsibleInstall do
|
||||
include_context "unit"
|
||||
|
||||
subject { VagrantPlugins::Ansible::Cap::Guest::Debian::AnsibleInstall }
|
||||
|
||||
let(:iso_env) do
|
||||
# We have to create a Vagrantfile so there is a root path
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
let(:machine) { iso_env.machine(iso_env.machine_names[0], :dummy) }
|
||||
let(:communicator) { double("comm") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(communicator).to receive(:execute).and_return(true)
|
||||
end
|
||||
|
||||
describe "#ansible_install" do
|
||||
|
||||
it_behaves_like "Ansible setup via pip on Debian-based systems"
|
||||
|
||||
describe "when install_mode is :default (or unknown)" do
|
||||
it "installs ansible with apt package manager" do
|
||||
install_backports_if_wheezy_release = <<INLINE_CRIPT
|
||||
CODENAME=`lsb_release -cs`
|
||||
if [ x$CODENAME == 'xwheezy' ]; then
|
||||
echo 'deb http://http.debian.net/debian wheezy-backports main' > /etc/apt/sources.list.d/wheezy-backports.list
|
||||
fi
|
||||
INLINE_CRIPT
|
||||
|
||||
expect(communicator).to receive(:sudo).once.ordered.with(install_backports_if_wheezy_release)
|
||||
expect(communicator).to receive(:sudo).once.ordered.with("apt-get update -y -qq")
|
||||
expect(communicator).to receive(:sudo).once.ordered.with("DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" ansible")
|
||||
|
||||
subject.ansible_install(machine, :default, "", "", "")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,41 +0,0 @@
|
|||
require_relative "../../../../../../base"
|
||||
require_relative "../shared/pip_ansible_install_examples"
|
||||
|
||||
|
||||
require Vagrant.source_root.join("plugins/provisioners/ansible/cap/guest/freebsd/ansible_install")
|
||||
|
||||
|
||||
describe VagrantPlugins::Ansible::Cap::Guest::FreeBSD::AnsibleInstall do
|
||||
include_context "unit"
|
||||
|
||||
subject { VagrantPlugins::Ansible::Cap::Guest::FreeBSD::AnsibleInstall }
|
||||
|
||||
let(:iso_env) do
|
||||
# We have to create a Vagrantfile so there is a root path
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
let(:machine) { iso_env.machine(iso_env.machine_names[0], :dummy) }
|
||||
let(:communicator) { double("comm") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(communicator).to receive(:execute).and_return(true)
|
||||
end
|
||||
|
||||
describe "#ansible_install" do
|
||||
|
||||
it_behaves_like "Ansible setup via pip is not implemented"
|
||||
|
||||
describe "when install_mode is :default (or unknown)" do
|
||||
it "installs ansible with 'pkg' package manager" do
|
||||
expect(communicator).to receive(:sudo).with("pkg install -qy py36-ansible")
|
||||
|
||||
subject.ansible_install(machine, :default, "", "", "")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -23,39 +23,30 @@ describe VagrantPlugins::Ansible::Cap::Guest::Pip do
|
|||
end
|
||||
|
||||
describe "#get_pip" do
|
||||
describe "when no pip_install_cmd argument is provided" do
|
||||
describe 'when no pip_install_command argument is provided' do
|
||||
it "installs pip using the default command" do
|
||||
expect(communicator).to receive(:execute).
|
||||
with("curl https://bootstrap.pypa.io/get-pip.py | sudo python")
|
||||
|
||||
expect(communicator).to receive(:execute).with("curl https://bootstrap.pypa.io/get-pip.py | sudo python")
|
||||
subject.get_pip(machine)
|
||||
end
|
||||
end
|
||||
|
||||
describe "when pip_install_cmd argument is provided" do
|
||||
describe 'when pip_install_command argument is provided' do
|
||||
it "runs the supplied argument instead of default" do
|
||||
pip_install_cmd = "foo"
|
||||
|
||||
expect(communicator).to receive(:execute).with(pip_install_cmd)
|
||||
|
||||
subject.get_pip(machine, pip_install_cmd)
|
||||
pip_install_command = "foo"
|
||||
expect(communicator).to receive(:execute).with(pip_install_command)
|
||||
subject.get_pip(machine,pip_install_command)
|
||||
end
|
||||
|
||||
it "installs pip using the default command if the argument is empty" do
|
||||
pip_install_cmd = ""
|
||||
|
||||
expect(communicator).to receive(:execute).
|
||||
with("curl https://bootstrap.pypa.io/get-pip.py | sudo python")
|
||||
|
||||
subject.get_pip(machine, pip_install_cmd)
|
||||
end
|
||||
|
||||
pip_install_command = ""
|
||||
expect(communicator).to receive(:execute).with("curl https://bootstrap.pypa.io/get-pip.py | sudo python")
|
||||
subject.get_pip(machine,pip_install_command)
|
||||
end
|
||||
|
||||
it "installs pip using the default command if the argument is nil" do
|
||||
expect(communicator).to receive(:execute).
|
||||
with("curl https://bootstrap.pypa.io/get-pip.py | sudo python")
|
||||
|
||||
expect(communicator).to receive(:execute).with("curl https://bootstrap.pypa.io/get-pip.py | sudo python")
|
||||
subject.get_pip(machine, nil)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,68 +0,0 @@
|
|||
|
||||
shared_examples_for "Ansible setup via pip" do
|
||||
|
||||
describe "when install_mode is :pip" do
|
||||
it "installs pip and calls Cap::Guest::Pip::pip_install" do
|
||||
expect(communicator).to receive(:sudo).at_least(1).times.ordered
|
||||
expect(VagrantPlugins::Ansible::Cap::Guest::Pip).to receive(:pip_install).once.ordered.
|
||||
with(machine, "ansible", anything, anything, true)
|
||||
|
||||
subject.ansible_install(machine, :pip, "", "", "")
|
||||
end
|
||||
end
|
||||
|
||||
describe "when install_mode is :pip_args_only" do
|
||||
it "installs pip and calls Cap::Guest::Pip::pip_install with 'pip_args' parameter" do
|
||||
pip_args = "-r /vagrant/requirements.txt"
|
||||
|
||||
expect(communicator).to receive(:sudo).at_least(1).times.ordered
|
||||
expect(VagrantPlugins::Ansible::Cap::Guest::Pip).to receive(:pip_install).with(machine, "", "", pip_args, false).ordered
|
||||
|
||||
subject.ansible_install(machine, :pip_args_only, "", pip_args, "")
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
shared_examples_for "Ansible setup via pip on Debian-based systems" do
|
||||
|
||||
describe "installs required Debian packages and..." do
|
||||
pip_install_cmd = "foo"
|
||||
|
||||
it "calls Cap::Guest::Pip::get_pip with 'pip' install_mode" do
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("apt-get update -y -qq")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" build-essential curl git libssl-dev libffi-dev python-dev")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("pip install --upgrade ansible")
|
||||
|
||||
subject.ansible_install(machine, :pip, "", "", pip_install_cmd)
|
||||
end
|
||||
|
||||
it "calls Cap::Guest::Pip::get_pip with 'pip_args_only' install_mode" do
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("apt-get update -y -qq")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" build-essential curl git libssl-dev libffi-dev python-dev")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("pip install")
|
||||
|
||||
subject.ansible_install(machine, :pip_args_only, "", "", pip_install_cmd)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
it_behaves_like "Ansible setup via pip"
|
||||
|
||||
end
|
||||
|
||||
shared_examples_for "Ansible setup via pip is not implemented" do
|
||||
|
||||
describe "when install_mode is different from :default" do
|
||||
it "raises an AnsiblePipInstallIsNotSupported error" do
|
||||
expect { subject.ansible_install(machine, :ansible_the_hardway, "", "", "") }.to raise_error(VagrantPlugins::Ansible::Errors::AnsiblePipInstallIsNotSupported)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,41 +0,0 @@
|
|||
require_relative "../../../../../../base"
|
||||
require_relative "../shared/pip_ansible_install_examples"
|
||||
|
||||
|
||||
require Vagrant.source_root.join("plugins/provisioners/ansible/cap/guest/suse/ansible_install")
|
||||
|
||||
|
||||
describe VagrantPlugins::Ansible::Cap::Guest::SUSE::AnsibleInstall do
|
||||
include_context "unit"
|
||||
|
||||
subject { VagrantPlugins::Ansible::Cap::Guest::SUSE::AnsibleInstall }
|
||||
|
||||
let(:iso_env) do
|
||||
# We have to create a Vagrantfile so there is a root path
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
let(:machine) { iso_env.machine(iso_env.machine_names[0], :dummy) }
|
||||
let(:communicator) { double("comm") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(communicator).to receive(:execute).and_return(true)
|
||||
end
|
||||
|
||||
describe "#ansible_install" do
|
||||
|
||||
it_behaves_like "Ansible setup via pip is not implemented"
|
||||
|
||||
describe "when install_mode is :default (or unknown)" do
|
||||
it "installs ansible with 'zypper' package manager" do
|
||||
expect(communicator).to receive(:sudo).with("zypper --non-interactive --quiet install ansible")
|
||||
|
||||
subject.ansible_install(machine, :default, "", "", "")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,76 +0,0 @@
|
|||
require_relative "../../../../../../base"
|
||||
require_relative "../shared/pip_ansible_install_examples"
|
||||
|
||||
|
||||
require Vagrant.source_root.join("plugins/provisioners/ansible/cap/guest/ubuntu/ansible_install")
|
||||
|
||||
|
||||
describe VagrantPlugins::Ansible::Cap::Guest::Ubuntu::AnsibleInstall do
|
||||
include_context "unit"
|
||||
|
||||
subject { VagrantPlugins::Ansible::Cap::Guest::Ubuntu::AnsibleInstall }
|
||||
|
||||
let(:iso_env) do
|
||||
# We have to create a Vagrantfile so there is a root path
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
let(:machine) { iso_env.machine(iso_env.machine_names[0], :dummy) }
|
||||
let(:communicator) { double("comm") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(communicator).to receive(:execute).and_return(true)
|
||||
end
|
||||
|
||||
describe "#ansible_install" do
|
||||
|
||||
it_behaves_like "Ansible setup via pip on Debian-based systems"
|
||||
|
||||
describe "when install_mode is :default (or unknown)" do
|
||||
describe "#ansible_apt_install" do
|
||||
describe "installs ansible from ansible/ansible PPA repository" do
|
||||
|
||||
check_if_add_apt_repository_is_present="test -x \"$(which add-apt-repository)\""
|
||||
|
||||
it "first installs 'software-properties-common' package if add-apt-repository is not already present" do
|
||||
allow(communicator).to receive(:test).
|
||||
with(check_if_add_apt_repository_is_present).and_return(false)
|
||||
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("""
|
||||
apt-get update -y -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq software-properties-common --option \"Dpkg::Options::=--force-confold\"
|
||||
""")
|
||||
expect(communicator).to receive(:sudo).once.ordered.
|
||||
with("""
|
||||
add-apt-repository ppa:ansible/ansible -y && \
|
||||
apt-get update -y -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq ansible --option \"Dpkg::Options::=--force-confold\"
|
||||
""")
|
||||
|
||||
subject.ansible_install(machine, :default, "", "", "")
|
||||
end
|
||||
|
||||
it "adds 'ppa:ansible/ansible' and install 'ansible' package" do
|
||||
allow(communicator).to receive(:test).
|
||||
with(check_if_add_apt_repository_is_present).and_return(true)
|
||||
|
||||
expect(communicator).to receive(:sudo).
|
||||
with("""
|
||||
add-apt-repository ppa:ansible/ansible -y && \
|
||||
apt-get update -y -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq ansible --option \"Dpkg::Options::=--force-confold\"
|
||||
""")
|
||||
|
||||
subject.ansible_install(machine, :default, "", "", "")
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,68 +0,0 @@
|
|||
require_relative "../../base"
|
||||
|
||||
require "vagrant/util/template_renderer"
|
||||
|
||||
describe "templates/nfs/exports_darwin" do
|
||||
let(:template) { "nfs/exports_darwin" }
|
||||
let(:user) { "501" }
|
||||
let(:uuid) { "UUID" }
|
||||
let(:opts) { {:bsd__compiled_nfs_options => "-alldirs -mapall=501:80"} }
|
||||
let(:ips) { ["172.16.0.2"] }
|
||||
|
||||
it "renders the template" do
|
||||
result = Vagrant::Util::TemplateRenderer.render(template, {
|
||||
user: user,
|
||||
uuid: uuid,
|
||||
folders: []
|
||||
})
|
||||
expect(result).to eq <<-EOH.gsub(/^ {6}/, "")
|
||||
# VAGRANT-BEGIN: 501 UUID
|
||||
# VAGRANT-END: 501 UUID
|
||||
EOH
|
||||
end
|
||||
|
||||
context "one nfs mount" do
|
||||
let(:folders) {
|
||||
{
|
||||
["/vagrant"] => opts
|
||||
}
|
||||
}
|
||||
|
||||
it "renders the template" do
|
||||
result = Vagrant::Util::TemplateRenderer.render(template, {
|
||||
user: user,
|
||||
uuid: uuid,
|
||||
folders: folders,
|
||||
ips: ips
|
||||
})
|
||||
expect(result).to eq <<-EOH.gsub(/^ {8}/, "")
|
||||
# VAGRANT-BEGIN: 501 UUID
|
||||
/vagrant -alldirs -mapall=501:80 172.16.0.2
|
||||
# VAGRANT-END: 501 UUID
|
||||
EOH
|
||||
end
|
||||
end
|
||||
|
||||
context "subdirectory that should also be exported" do
|
||||
let(:folders) {
|
||||
{
|
||||
["/vagrant", "/vagrant/other"] => opts
|
||||
}
|
||||
}
|
||||
|
||||
it "puts each directory on its own line" do
|
||||
result = Vagrant::Util::TemplateRenderer.render(template, {
|
||||
user: user,
|
||||
uuid: uuid,
|
||||
folders: folders,
|
||||
ips: ips
|
||||
})
|
||||
expect(result).to eq <<-EOH.gsub(/^ {8}/, "")
|
||||
# VAGRANT-BEGIN: 501 UUID
|
||||
/vagrant -alldirs -mapall=501:80 172.16.0.2
|
||||
/vagrant/other -alldirs -mapall=501:80 172.16.0.2
|
||||
# VAGRANT-END: 501 UUID
|
||||
EOH
|
||||
end
|
||||
end
|
||||
end
|
|
@ -209,29 +209,6 @@ describe Vagrant::Action::Builtin::BoxAdd, :skip_windows, :bsdtar do
|
|||
to raise_error(Vagrant::Errors::BoxChecksumMismatch)
|
||||
end
|
||||
|
||||
it "ignores checksums if empty string" do
|
||||
box_path = iso_env.box2_file(:virtualbox)
|
||||
with_web_server(box_path) do |port|
|
||||
env[:box_name] = "foo"
|
||||
env[:box_url] = "http://127.0.0.1:#{port}/#{box_path.basename}"
|
||||
env[:box_checksum] = ""
|
||||
env[:box_checksum_type] = ""
|
||||
|
||||
|
||||
expect(box_collection).to receive(:add).with(any_args) { |path, name, version, **opts|
|
||||
expect(checksum(path)).to eq(checksum(box_path))
|
||||
expect(name).to eq("foo")
|
||||
expect(version).to eq("0")
|
||||
expect(opts[:metadata_url]).to be_nil
|
||||
true
|
||||
}.and_return(box)
|
||||
|
||||
expect(app).to receive(:call).with(env)
|
||||
|
||||
subject.call(env)
|
||||
end
|
||||
end
|
||||
|
||||
it "does not raise an error if the checksum has different case" do
|
||||
box_path = iso_env.box2_file(:virtualbox)
|
||||
|
||||
|
|
|
@ -54,21 +54,11 @@ describe Vagrant::Action::Builtin::BoxCheckOutdated do
|
|||
env[:box_outdated_force] = true
|
||||
|
||||
expect(app).to receive(:call).with(env).once
|
||||
expect(box).to receive(:has_update?)
|
||||
|
||||
subject.call(env)
|
||||
|
||||
expect(env).to have_key(:box_outdated)
|
||||
end
|
||||
|
||||
it "checks if not forced" do
|
||||
machine.config.vm.box_check_update = false
|
||||
env[:box_outdated_force] = false
|
||||
|
||||
expect(app).to receive(:call).with(env).once
|
||||
|
||||
subject.call(env)
|
||||
end
|
||||
end
|
||||
|
||||
context "no box" do
|
||||
|
@ -151,41 +141,6 @@ describe Vagrant::Action::Builtin::BoxCheckOutdated do
|
|||
expect(env[:box_outdated]).to be(true)
|
||||
end
|
||||
|
||||
context "both local and remote update exist" do
|
||||
it "should prompt user to update" do
|
||||
iso_env.box3("foo", "1.1", :virtualbox)
|
||||
|
||||
md = Vagrant::BoxMetadata.new(StringIO.new(<<-RAW))
|
||||
{
|
||||
"name": "foo",
|
||||
"versions": [
|
||||
{
|
||||
"version": "1.2",
|
||||
"providers": [
|
||||
{
|
||||
"name": "virtualbox",
|
||||
"url": "bar"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
RAW
|
||||
|
||||
expect(box).to receive(:has_update?).with(machine.config.vm.box_version,
|
||||
{download_options:
|
||||
{automatic_check: true, ca_cert: nil, ca_path: nil, client_cert: nil, insecure: false}}).
|
||||
and_return([md, md.version("1.2"), md.version("1.2").provider("virtualbox")])
|
||||
|
||||
allow(I18n).to receive(:t) { :ok }
|
||||
expect(I18n).to receive(:t).with(/box_outdated_single/, hash_including(latest: "1.2")).once
|
||||
|
||||
expect(app).to receive(:call).with(env).once
|
||||
|
||||
subject.call(env)
|
||||
end
|
||||
end
|
||||
|
||||
it "does not have a local update if not within constraints" do
|
||||
iso_env.box3("foo", "1.1", :virtualbox)
|
||||
|
||||
|
@ -272,21 +227,4 @@ describe Vagrant::Action::Builtin::BoxCheckOutdated do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe ".check_outdated_local" do
|
||||
let(:updated_box) do
|
||||
box_dir = iso_env.box3("foo", "1.1", :virtualbox)
|
||||
Vagrant::Box.new("foo", :virtualbox, "1.1", box_dir).tap do |b|
|
||||
allow(b).to receive(:has_update?).and_return(nil)
|
||||
end
|
||||
end
|
||||
|
||||
it "should return the updated box if it is already installed" do
|
||||
expect(env[:box_collection]).to receive(:find).with("foo", :virtualbox, "> 1.0").and_return(updated_box)
|
||||
|
||||
local_update = subject.check_outdated_local(env)
|
||||
|
||||
expect(local_update).to eq(updated_box)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
require File.expand_path("../../../../base", __FILE__)
|
||||
|
||||
describe Vagrant::Action::Builtin::Disk do
|
||||
let(:app) { lambda { |env| } }
|
||||
let(:vm) { double("vm") }
|
||||
let(:config) { double("config", vm: vm) }
|
||||
let(:provider) { double("provider") }
|
||||
let(:machine) { double("machine", config: config, provider: provider, provider_name: "provider") }
|
||||
let(:env) { { ui: ui, machine: machine} }
|
||||
|
||||
let(:disks) { [double("disk")] }
|
||||
|
||||
let(:ui) { double("ui") }
|
||||
|
||||
describe "#call" do
|
||||
it "calls configure_disks if disk config present" do
|
||||
allow(vm).to receive(:disks).and_return(disks)
|
||||
allow(machine).to receive(:disks).and_return(disks)
|
||||
allow(machine.provider).to receive(:capability?).with(:configure_disks).and_return(true)
|
||||
subject = described_class.new(app, env)
|
||||
|
||||
expect(app).to receive(:call).with(env).ordered
|
||||
expect(machine.provider).to receive(:capability).with(:configure_disks, disks)
|
||||
|
||||
subject.call(env)
|
||||
end
|
||||
|
||||
it "continues on if no disk config present" do
|
||||
allow(vm).to receive(:disks).and_return([])
|
||||
subject = described_class.new(app, env)
|
||||
|
||||
expect(app).to receive(:call).with(env).ordered
|
||||
expect(machine.provider).not_to receive(:capability).with(:configure_disks, disks)
|
||||
|
||||
subject.call(env)
|
||||
end
|
||||
|
||||
it "prints a warning if disk config capability is unsupported" do
|
||||
allow(vm).to receive(:disks).and_return(disks)
|
||||
allow(machine.provider).to receive(:capability?).with(:configure_disks).and_return(false)
|
||||
subject = described_class.new(app, env)
|
||||
|
||||
expect(app).to receive(:call).with(env).ordered
|
||||
expect(machine.provider).not_to receive(:capability).with(:configure_disks, disks)
|
||||
expect(ui).to receive(:warn)
|
||||
|
||||
subject.call(env)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -75,8 +75,7 @@ describe Vagrant::Action::Builtin::HandleForwardedPortCollisions do
|
|||
end
|
||||
|
||||
it "should check if host port is in use" do
|
||||
expect(instance).to receive(:is_forwarded_already).and_return(false)
|
||||
expect(instance).to receive(:is_port_open?).and_return(false)
|
||||
expect(instance).to receive(:is_forwarded_already).and_return false
|
||||
instance.call(env)
|
||||
end
|
||||
|
||||
|
@ -149,7 +148,7 @@ describe Vagrant::Action::Builtin::HandleForwardedPortCollisions do
|
|||
let(:host_port){ 8080 }
|
||||
|
||||
it "should check if the port is open" do
|
||||
expect(instance).to receive(:is_port_open?).with(host_ip, host_port).and_return(true)
|
||||
expect(instance).to receive(:is_port_open?).with(host_ip, host_port).and_return true
|
||||
instance.send(:port_check, host_ip, host_port)
|
||||
end
|
||||
|
||||
|
@ -157,13 +156,13 @@ describe Vagrant::Action::Builtin::HandleForwardedPortCollisions do
|
|||
let(:host_ip){ nil }
|
||||
|
||||
it "should set host_ip to 0.0.0.0 when unset" do
|
||||
expect(instance).to receive(:is_port_open?).with("0.0.0.0", host_port).and_return(true)
|
||||
expect(instance).to receive(:is_port_open?).with("0.0.0.0", host_port).and_return true
|
||||
instance.send(:port_check, host_ip, host_port)
|
||||
end
|
||||
|
||||
it "should set host_ip to 127.0.0.1 when 0.0.0.0 is not available" do
|
||||
expect(instance).to receive(:is_port_open?).with("0.0.0.0", host_port).and_raise(Errno::EADDRNOTAVAIL)
|
||||
expect(instance).to receive(:is_port_open?).with("127.0.0.1", host_port).and_return(true)
|
||||
expect(instance).to receive(:is_port_open?).with("0.0.0.0", host_port).and_raise Errno::EADDRNOTAVAIL
|
||||
expect(instance).to receive(:is_port_open?).with("127.0.0.1", host_port).and_return true
|
||||
instance.send(:port_check, host_ip, host_port)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -13,7 +13,7 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
sandbox.create_vagrant_env
|
||||
end
|
||||
|
||||
let(:provisioner_config){ double("provisioner_config", name: nil) }
|
||||
let(:provisioner_config){ {} }
|
||||
let(:provisioner_one) do
|
||||
prov = VagrantPlugins::Kernel_V2::VagrantConfigProvisioner.new("spec-test", :shell)
|
||||
prov.config = provisioner_config
|
||||
|
@ -24,14 +24,8 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
prov.config = provisioner_config
|
||||
prov
|
||||
end
|
||||
let(:provisioner_three) do
|
||||
prov = VagrantPlugins::Kernel_V2::VagrantConfigProvisioner.new(nil, :shell)
|
||||
provisioner_config = double("provisioner_config", name: "my_shell")
|
||||
prov.config = provisioner_config
|
||||
prov
|
||||
end
|
||||
|
||||
let(:provisioner_instances) { [provisioner_one,provisioner_two,provisioner_three] }
|
||||
let(:provisioner_instances) { [provisioner_one,provisioner_two] }
|
||||
|
||||
let(:ui) { double("ui") }
|
||||
let(:vm) { double("vm", provisioners: provisioner_instances) }
|
||||
|
@ -60,17 +54,6 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
shell_two = result[1]
|
||||
expect(shell_two.first).to be_a(VagrantPlugins::Shell::Provisioner)
|
||||
end
|
||||
|
||||
it "returns all the instances of configured provisioners" do
|
||||
result = subject.provisioner_instances(env)
|
||||
expect(result.size).to eq(provisioner_instances.size)
|
||||
shell_one = result.first
|
||||
expect(shell_one[1][:name]).to eq(:"spec-test")
|
||||
shell_two = result[1]
|
||||
expect(shell_two[1][:name]).to eq(:"spec-test")
|
||||
shell_three = result[2]
|
||||
expect(shell_three[1][:name]).to eq(:"my_shell")
|
||||
end
|
||||
end
|
||||
|
||||
context "#sort_provisioner_instances" do
|
||||
|
@ -108,9 +91,9 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
|
||||
it "returns the array in the correct order" do
|
||||
result = subject.provisioner_instances(env)
|
||||
expect(result[0].last[:name]).to eq(:"before-test")
|
||||
expect(result[1].last[:name]).to eq(:"root-test")
|
||||
expect(result[2].last[:name]).to eq(:"after-test")
|
||||
expect(result[0].last[:name]).to eq("before-test")
|
||||
expect(result[1].last[:name]).to eq("root-test")
|
||||
expect(result[2].last[:name]).to eq("after-test")
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -138,10 +121,10 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
it "puts the each shortcut provisioners in place" do
|
||||
result = subject.provisioner_instances(env)
|
||||
|
||||
expect(result[0].last[:name]).to eq(:"before-test")
|
||||
expect(result[1].last[:name]).to eq(:"root-test")
|
||||
expect(result[2].last[:name]).to eq(:"before-test")
|
||||
expect(result[3].last[:name]).to eq(:"root2-test")
|
||||
expect(result[0].last[:name]).to eq("before-test")
|
||||
expect(result[1].last[:name]).to eq("root-test")
|
||||
expect(result[2].last[:name]).to eq("before-test")
|
||||
expect(result[3].last[:name]).to eq("root2-test")
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -169,10 +152,10 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
it "puts the each shortcut provisioners in place" do
|
||||
result = subject.provisioner_instances(env)
|
||||
|
||||
expect(result[0].last[:name]).to eq(:"root-test")
|
||||
expect(result[1].last[:name]).to eq(:"after-test")
|
||||
expect(result[2].last[:name]).to eq(:"root2-test")
|
||||
expect(result[3].last[:name]).to eq(:"after-test")
|
||||
expect(result[0].last[:name]).to eq("root-test")
|
||||
expect(result[1].last[:name]).to eq("after-test")
|
||||
expect(result[2].last[:name]).to eq("root2-test")
|
||||
expect(result[3].last[:name]).to eq("after-test")
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -206,12 +189,12 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
it "puts the each shortcut provisioners in place" do
|
||||
result = subject.provisioner_instances(env)
|
||||
|
||||
expect(result[0].last[:name]).to eq(:"before-test")
|
||||
expect(result[1].last[:name]).to eq(:"root-test")
|
||||
expect(result[2].last[:name]).to eq(:"after-test")
|
||||
expect(result[3].last[:name]).to eq(:"before-test")
|
||||
expect(result[4].last[:name]).to eq(:"root2-test")
|
||||
expect(result[5].last[:name]).to eq(:"after-test")
|
||||
expect(result[0].last[:name]).to eq("before-test")
|
||||
expect(result[1].last[:name]).to eq("root-test")
|
||||
expect(result[2].last[:name]).to eq("after-test")
|
||||
expect(result[3].last[:name]).to eq("before-test")
|
||||
expect(result[4].last[:name]).to eq("root2-test")
|
||||
expect(result[5].last[:name]).to eq("after-test")
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -245,10 +228,10 @@ describe Vagrant::Action::Builtin::MixinProvisioners do
|
|||
it "puts the each shortcut provisioners in place" do
|
||||
result = subject.provisioner_instances(env)
|
||||
|
||||
expect(result[0].last[:name]).to eq(:"before-test")
|
||||
expect(result[1].last[:name]).to eq(:"root-test")
|
||||
expect(result[2].last[:name]).to eq(:"root2-test")
|
||||
expect(result[3].last[:name]).to eq(:"after-test")
|
||||
expect(result[0].last[:name]).to eq("before-test")
|
||||
expect(result[1].last[:name]).to eq("root-test")
|
||||
expect(result[2].last[:name]).to eq("root2-test")
|
||||
expect(result[3].last[:name]).to eq("after-test")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -71,7 +71,7 @@ describe Vagrant::Action::Builtin::Provision do
|
|||
prov.config = provisioner_config
|
||||
prov
|
||||
end
|
||||
let(:provisioner_config){ double("provisioner_config", name: "spec-test") }
|
||||
let(:provisioner_config){ {} }
|
||||
|
||||
before{ expect(vm_config).to receive(:provisioners).and_return([provisioner]) }
|
||||
|
||||
|
@ -107,13 +107,13 @@ describe Vagrant::Action::Builtin::Provision do
|
|||
end
|
||||
|
||||
it "should not run if provision types are set and provisioner is not included" do
|
||||
env[:provision_types] = [:"other-provisioner", :"other-test"]
|
||||
env[:provision_types] = ["other-provisioner", "other-test"]
|
||||
expect(hook).not_to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
||||
it "should run if provision types are set and include provisioner name" do
|
||||
env[:provision_types] = [:"spec-test"]
|
||||
env[:provision_types] = ["spec-test"]
|
||||
expect(hook).to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
@ -142,13 +142,13 @@ describe Vagrant::Action::Builtin::Provision do
|
|||
end
|
||||
|
||||
it "should not run if provision types are set and provisioner is not included" do
|
||||
env[:provision_types] = [:"other-provisioner", :"other-test"]
|
||||
env[:provision_types] = ["other-provisioner", "other-test"]
|
||||
expect(hook).not_to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
||||
it "should run if provision types are set and include provisioner name" do
|
||||
env[:provision_types] = [:"spec-test"]
|
||||
env[:provision_types] = ["spec-test"]
|
||||
expect(hook).to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
@ -177,13 +177,13 @@ describe Vagrant::Action::Builtin::Provision do
|
|||
end
|
||||
|
||||
it "should not run if provision types are set and provisioner is not included" do
|
||||
env[:provision_types] = [:"other-provisioner", :"other-test"]
|
||||
env[:provision_types] = ["other-provisioner", "other-test"]
|
||||
expect(hook).not_to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
||||
it "should run if provision types are set and include provisioner name" do
|
||||
env[:provision_types] = [:"spec-test"]
|
||||
env[:provision_types] = ["spec-test"]
|
||||
expect(hook).to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
@ -192,7 +192,7 @@ describe Vagrant::Action::Builtin::Provision do
|
|||
File.open(File.join(data_dir.to_s, "action_provision"), "w") do |file|
|
||||
file.write("1.5:machine-id")
|
||||
end
|
||||
env[:provision_types] = [:"spec-test"]
|
||||
env[:provision_types] = ["spec-test"]
|
||||
expect(hook).to receive(:call).with(:provisioner_run, anything)
|
||||
instance.call(env)
|
||||
end
|
||||
|
|
|
@ -18,14 +18,8 @@ describe Vagrant::Action::Builtin::SSHRun do
|
|||
)
|
||||
end
|
||||
|
||||
let(:vm) do
|
||||
double("vm",
|
||||
communicator: nil
|
||||
)
|
||||
end
|
||||
|
||||
# Configuration mock
|
||||
let(:config) { double("config", ssh: ssh, vm: vm) }
|
||||
let(:config) { double("config", ssh: ssh) }
|
||||
|
||||
let(:machine) do
|
||||
double("machine",
|
||||
|
@ -86,63 +80,4 @@ describe Vagrant::Action::Builtin::SSHRun do
|
|||
env[:ssh_run_command] = "echo test"
|
||||
described_class.new(app, env).call(env)
|
||||
end
|
||||
|
||||
context "when using the WinSSH communicator" do
|
||||
let(:winssh) { double("winssh", shell: "foo") }
|
||||
|
||||
before do
|
||||
expect(vm).to receive(:communicator).and_return(:winssh)
|
||||
expect(config).to receive(:winssh).and_return(winssh)
|
||||
env[:tty] = nil
|
||||
end
|
||||
|
||||
it "should use the WinSSH shell for running ssh commands" do
|
||||
ssh_info = { foo: :bar }
|
||||
opts = {:extra_args=>["foo -c 'dir'"], :subprocess=>true}
|
||||
|
||||
expect(ssh_klass).to receive(:exec).
|
||||
with(ssh_info, opts)
|
||||
|
||||
env[:ssh_info] = ssh_info
|
||||
env[:ssh_run_command] = "dir"
|
||||
described_class.new(app, env).call(env)
|
||||
end
|
||||
|
||||
context "when shell is cmd" do
|
||||
before do
|
||||
expect(winssh).to receive(:shell).and_return('cmd')
|
||||
end
|
||||
|
||||
it "should use appropriate options for cmd" do
|
||||
ssh_info = { foo: :bar }
|
||||
opts = {:extra_args=>["cmd /C dir "], :subprocess=>true}
|
||||
|
||||
expect(ssh_klass).to receive(:exec).
|
||||
with(ssh_info, opts)
|
||||
|
||||
env[:ssh_info] = ssh_info
|
||||
env[:ssh_run_command] = "dir"
|
||||
described_class.new(app, env).call(env)
|
||||
end
|
||||
end
|
||||
|
||||
context "when shell is powershell" do
|
||||
before do
|
||||
expect(winssh).to receive(:shell).and_return('powershell')
|
||||
end
|
||||
|
||||
it "should base64 encode the command" do
|
||||
ssh_info = { foo: :bar }
|
||||
encoded_command = "JABQAHIAbwBnAHIAZQBzAHMAUAByAGUAZgBlAHIAZQBuAGMAZQAgAD0AIAAiAFMAaQBsAGUAbgB0AGwAeQBDAG8AbgB0AGkAbgB1AGUAIgA7ACAAZABpAHIA"
|
||||
opts = {:extra_args=>["powershell -encodedCommand #{encoded_command}"], :subprocess=>true}
|
||||
|
||||
expect(ssh_klass).to receive(:exec).
|
||||
with(ssh_info, opts)
|
||||
|
||||
env[:ssh_info] = ssh_info
|
||||
env[:ssh_run_command] = "dir"
|
||||
described_class.new(app, env).call(env)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,48 +1,6 @@
|
|||
require File.expand_path("../../../base", __FILE__)
|
||||
|
||||
describe Vagrant::Action::Warden do
|
||||
class ActionOne
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def recover(env)
|
||||
env[:recover] << 1
|
||||
end
|
||||
end
|
||||
|
||||
class ActionTwo
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def recover(env)
|
||||
env[:recover] << 2
|
||||
end
|
||||
end
|
||||
|
||||
class ExitAction
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def recover(env)
|
||||
env[:recover] = true
|
||||
end
|
||||
end
|
||||
|
||||
let(:data) { { data: [] } }
|
||||
let(:instance) { described_class.new }
|
||||
|
||||
|
@ -60,10 +18,38 @@ describe Vagrant::Action::Warden do
|
|||
end
|
||||
|
||||
it "starts a recovery sequence when an exception is raised" do
|
||||
class Action
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def recover(env)
|
||||
env[:recover] << 1
|
||||
end
|
||||
end
|
||||
|
||||
class ActionTwo
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def recover(env)
|
||||
env[:recover] << 2
|
||||
end
|
||||
end
|
||||
|
||||
error_proc = Proc.new { raise "ERROR!" }
|
||||
|
||||
data = { recover: [] }
|
||||
instance = described_class.new([ActionOne, ActionTwo, error_proc], data)
|
||||
instance = described_class.new([Action, ActionTwo, error_proc], data)
|
||||
|
||||
# The error should be raised back up
|
||||
expect { instance.call(data) }.
|
||||
|
@ -77,11 +63,25 @@ describe Vagrant::Action::Warden do
|
|||
end
|
||||
|
||||
it "does not do a recovery sequence if SystemExit is raised" do
|
||||
class Action
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def recover(env)
|
||||
env[:recover] = true
|
||||
end
|
||||
end
|
||||
|
||||
# Make a proc that just calls "abort" which raises a
|
||||
# SystemExit exception.
|
||||
error_proc = Proc.new { abort }
|
||||
|
||||
instance = described_class.new([ExitAction, error_proc], data)
|
||||
instance = described_class.new([Action, error_proc], data)
|
||||
|
||||
# The SystemExit should come through
|
||||
expect { instance.call(data) }.to raise_error(SystemExit)
|
||||
|
@ -89,36 +89,4 @@ describe Vagrant::Action::Warden do
|
|||
# The recover should not have been called
|
||||
expect(data.key?(:recover)).not_to be
|
||||
end
|
||||
|
||||
context "when hook is defined" do
|
||||
let(:hook) { double("hook") }
|
||||
|
||||
before do
|
||||
data[:hook] = hook
|
||||
allow(hook).to receive(:call)
|
||||
end
|
||||
|
||||
it "should receive a before hook call" do
|
||||
expect(hook).to receive(:call).with(:before_action_one)
|
||||
described_class.new([ActionOne], data).call(data)
|
||||
end
|
||||
|
||||
it "should receive an after hook call" do
|
||||
expect(hook).to receive(:call).with(:after_action_one)
|
||||
described_class.new([ActionOne], data).call(data)
|
||||
end
|
||||
|
||||
it "should not receive any hook calls for proc instances" do
|
||||
expect(hook).not_to receive(:call)
|
||||
described_class.new([proc{|*_| :testing }], data).call(data)
|
||||
end
|
||||
|
||||
it "should receive before and after calls for each class" do
|
||||
expect(hook).to receive(:call).with(:before_action_one)
|
||||
expect(hook).to receive(:call).with(:after_action_one)
|
||||
expect(hook).to receive(:call).with(:before_action_two)
|
||||
expect(hook).to receive(:call).with(:after_action_two)
|
||||
described_class.new([ActionOne, proc{|*_| :testing }, ActionTwo], data).call(data)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -115,11 +115,6 @@ describe Vagrant::BoxMetadata do
|
|||
expect(subject.versions).to eq(
|
||||
["1.0.0", "1.1.0", "1.1.5"])
|
||||
end
|
||||
|
||||
it "filters versions by matching provider" do
|
||||
expect(subject.versions(provider: :vmware)).to eq(
|
||||
["1.0.0", "1.1.0"])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -406,17 +406,4 @@ describe Vagrant::UI::Prefixed do
|
|||
subject.output("foo", target: "bar")
|
||||
end
|
||||
end
|
||||
|
||||
describe "#format_message" do
|
||||
it "should return the same number of new lines as given" do
|
||||
["no new line", "one\nnew line", "two\nnew lines\n", "three\nnew lines\n\n"].each do |msg|
|
||||
expect(subject.format_message(:detail, msg).count("\n")).to eq(msg.count("\n"))
|
||||
end
|
||||
end
|
||||
|
||||
it "should properly format a blank message" do
|
||||
expect(subject.format_message(:detail, "", target: "default", prefix: true)).
|
||||
to match(/\s+default:\s+/)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -32,15 +32,4 @@ describe FileChecksum do
|
|||
expect(t_i.checksum).to eq(k_i.checksum)
|
||||
end
|
||||
end
|
||||
|
||||
context "with an invalid digest" do
|
||||
let(:fake_digest) { :fake_digest }
|
||||
|
||||
it "should raise an exception if the box has an invalid checksum type" do
|
||||
file = environment.workdir.join("file")
|
||||
file.open("w+") { |f| f.write("HELLO!") }
|
||||
|
||||
expect{ described_class.new(file, fake_digest) }.to raise_error(Vagrant::Errors::BoxChecksumInvalidType)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -49,15 +49,5 @@ describe Vagrant::Util::IsPortOpen do
|
|||
# best, really.
|
||||
expect(klass.is_port_open?("127.0.0.1", closed_port)).not_to be
|
||||
end
|
||||
|
||||
it "should handle connection refused" do
|
||||
expect(TCPSocket).to receive(:new).with("0.0.0.0", closed_port).and_raise(Errno::ECONNREFUSED)
|
||||
expect(klass.is_port_open?("0.0.0.0", closed_port)).to be(false)
|
||||
end
|
||||
|
||||
it "should raise an error if cannot assign requested address" do
|
||||
expect(TCPSocket).to receive(:new).with("0.0.0.0", open_port).and_raise(Errno::EADDRNOTAVAIL)
|
||||
expect { klass.is_port_open?("0.0.0.0", open_port) }.to raise_error(Errno::EADDRNOTAVAIL)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue