Compare commits
308 Commits
build-vagr
...
master
Author | SHA1 | Date |
---|---|---|
Brian Cain | 5cc06bde64 | |
comet | fa43afb57b | |
Chris Roberts | b231abe114 | |
Chris Roberts | ac268e2638 | |
Jeff Bonhag | db316a396f | |
Jeff Bonhag | a1abc177bd | |
林博仁(Buo-ren Lin) | 8ac6403bcc | |
Chris Roberts | 02c157e427 | |
Chris Roberts | d8d24f4d6e | |
Chris Roberts | ee302f3a9b | |
Brian Cain | 054f4def5c | |
Brian Cain | 0077d7955d | |
Brian Cain | f886184011 | |
Tenzin Chemi | e864605879 | |
Brian Cain | 7f09202571 | |
Brian Cain | 2ae6b37c9a | |
Renee Margaret McConahy | 1c6ac924c5 | |
Brian Cain | 6855dd8c74 | |
Brian Cain | dd5d73e8a9 | |
Brian Cain | 3d1f1d2422 | |
Brian Cain | ee8b38d47a | |
Brian Cain | b4c302a74c | |
Brian Cain | 711270b90a | |
Jeff Bonhag | 58687e6c44 | |
Jeff Bonhag | 7fb7dd8608 | |
Jeff Bonhag | f0b8c0737f | |
Jeff Bonhag | 88d2077cb8 | |
Jeff Bonhag | ade076cabb | |
Chris Roberts | 85065c3746 | |
Chris Roberts | 923ecc0943 | |
Chris Roberts | 25659a6f6b | |
Chris Roberts | 80c05460ab | |
Brian Cain | 8c137e2bba | |
Brian Cain | 4e58dfed2b | |
Brian Cain | 61ee42976b | |
Brian Cain | 7d667c9dea | |
Jose Luis Duran | 582f46231b | |
Brian Cain | 0c0352201d | |
Gilles Cornu | bcdda99dfa | |
Gilles Cornu | 069122f42c | |
Gilles Cornu | a2b87eace4 | |
Gilles Cornu | 113a0a7aaa | |
Stefan Koenen | 04aeff4cc6 | |
Brian Cain | 11c667a81d | |
Brian Cain | aecd354c57 | |
Brian Cain | 9f2261a6fc | |
Brian Cain | de61b307aa | |
Jeff Bonhag | 320b166c31 | |
Jeff Bonhag | d7a5f74897 | |
Jeff Bonhag | 4059758e00 | |
Jeff Bonhag | 2fa44f5b8f | |
Brian Cain | 55994824a4 | |
Jeff Bonhag | df9df5c250 | |
Brian Cain | 0b71991902 | |
Brian Cain | 8dae01e9c7 | |
Brian Cain | 20ccf46fb1 | |
Brian Cain | 4cdf2cdd8f | |
JJ Asghar | 9adf855f31 | |
Brian Cain | fb4e6985e1 | |
Brian Cain | f306131a97 | |
Brian Cain | 1e0385443b | |
Brian Cain | bcf4d5a210 | |
Per Lundberg | 527dda241f | |
Jeff Bonhag | b03f8f5aac | |
Jeff Bonhag | c6ee1049aa | |
Brian Cain | 9fc155bf75 | |
Brian Cain | f979d40436 | |
Brian Cain | f8449063b6 | |
Brian Cain | b56dede627 | |
Brian Cain | 8ad810b5b6 | |
Brian Cain | f55aca091c | |
Brian Cain | 86f9243762 | |
Brian Cain | 3e177d380f | |
Brian Cain | 734aad1ede | |
Brian Cain | b5b59a4eee | |
Brian Cain | ea7a230cb6 | |
Brian Cain | 57fd731fbf | |
Brian Cain | 3a2b4ddef2 | |
Brian Cain | cd98a8bf64 | |
Brian Cain | 2e324a4971 | |
Brian Cain | 271cf8a603 | |
Brian Cain | 8031ebe9d1 | |
Brian Cain | 17f8fbe65e | |
Brian Cain | 63b35ad909 | |
Brian Cain | b0f4d43663 | |
Brian Cain | 7feee7a87f | |
Brian Cain | f51805e910 | |
Brian Cain | 9190f4b2e6 | |
Brian Cain | 97db5d5da3 | |
Brian Cain | 1a02c52852 | |
Brian Cain | 54c3e28a45 | |
Brian Cain | a457dee8b0 | |
Brian Cain | a51e9b1fa1 | |
Brian Cain | 83fea21ff1 | |
Brian Cain | e598007237 | |
Brian Cain | 6c54bf6ad9 | |
Brian Cain | d54e870752 | |
Brian Cain | f01c90e676 | |
Brian Cain | 71ad0f7aba | |
Brian Cain | 35f113e759 | |
Brian Cain | 9c1d05113f | |
Brian Cain | 87366cf4f3 | |
Brian Cain | 93828508ec | |
Brian Cain | d6df83103e | |
Brian Cain | 499e39dd10 | |
Brian Cain | cef1bd47b3 | |
Brian Cain | a55e3d2b91 | |
Brian Cain | 8adffc830a | |
Brian Cain | c18f36e516 | |
Brian Cain | ad73969010 | |
Brian Cain | 7cfccb5cfd | |
Brian Cain | ee751ca6e1 | |
Brian Cain | 995c4bbc60 | |
Brian Cain | a18ce4f732 | |
Brian Cain | 393ce9eb1b | |
Brian Cain | 28d339eac5 | |
Brian Cain | ee388d8293 | |
Brian Cain | 34673fe5f9 | |
Brian Cain | 98a2d0f723 | |
Brian Cain | abcc334900 | |
Brian Cain | 1a21782bd5 | |
Brian Cain | e361900d3a | |
Brian Cain | aa5a3ef7f7 | |
Chris Roberts | 1d9533113c | |
Brian Cain | e535a8a624 | |
Brian Cain | e100d9cd48 | |
Brian Cain | 2901dae948 | |
Brian Cain | cb661886a2 | |
Brian Cain | c35861e937 | |
Brian Cain | 2656d3d9d5 | |
Brian Cain | 6831129720 | |
Brian Cain | f5886e6044 | |
Brian Cain | 601118059c | |
Brian Cain | 3ee7b3801f | |
Brian Cain | 2a2e2d5414 | |
Brian Cain | fb7a9d5582 | |
Tomas Krizek | 3519f82b24 | |
Brian Cain | efd3a62ffe | |
Brian Cain | 1699821571 | |
Brian Cain | 4d70856b8a | |
Juha Ruotsalainen | 4fc8b07974 | |
Rumpu-Jussi | f3629ebd09 | |
Juha Ruotsalainen | 8041d0ae78 | |
Brian Cain | 237af1b6aa | |
Brian Cain | ff5fc2a8f2 | |
Antonio Terceiro | 4fd2373753 | |
Brian Cain | 51ceda7e25 | |
Jeff Bonhag | a37226d63d | |
Brian Cain | a8a9f54fab | |
Jeff Bonhag | 307123c1f2 | |
Brian Cain | 28812fb880 | |
Brian Cain | 2adda6861e | |
Brian Cain | ed9769586b | |
Brian Cain | 06799402ed | |
Jeff Bonhag | c1a6910539 | |
Jeff Bonhag | 0e68f02dc9 | |
Chris Roberts | 85e34039b9 | |
Stefan Sundin | 48c893ee18 | |
Brian Cain | 2ea9323d0b | |
Brian Cain | 9e7fae0c51 | |
Brian Cain | 403fece5ac | |
Brian Cain | 8b21051466 | |
Brian Cain | 39ea1af5ab | |
Brian Cain | a8b99f422b | |
Chris Roberts | 063dea5699 | |
Jeff Bonhag | 0015a8c4c6 | |
Jeff Bonhag | a39a7ae794 | |
Chris Roberts | 687c601755 | |
Chris Roberts | 2eaa6e9be1 | |
Brian Cain | 474a2e3279 | |
Brian Cain | ce20b70073 | |
Brian Cain | 823c499dfb | |
Brian Cain | 361736579c | |
Brian Cain | 509812b8e6 | |
Brian Cain | 1ec5e4c396 | |
RJ Spiker | 26b6f61682 | |
Brian Cain | 145f04893c | |
Brian Cain | 44c6f655be | |
Brian Cain | 72496d5f5d | |
Jeff Bonhag | 7560c7fdef | |
Jeff Bonhag | 8a69c1205c | |
Chris Roberts | 89e0522659 | |
Brian Cain | e2adefe4c7 | |
Brian Cain | d4f83ca4a3 | |
RJ Spiker | 3d48faa1b4 | |
Brian Cain | 0aaa2cc147 | |
Brian Cain | f30b136c57 | |
Sam Mesterton-Gibbons | e4ab40393e | |
Dan Čermák | fe4743a22b | |
Brian Cain | d27d7d5a2a | |
Brian Cain | 4b1297b5d3 | |
Brian Cain | 811231bf92 | |
Christoph Moench-Tegeder | 6f6e58f4ae | |
Brian Cain | d70e3eb828 | |
Slav Donchev | 42482849ec | |
RJ Spiker | 0f7b250e81 | |
Slav Donchev | 8130314741 | |
Brian Cain | 4735699bc8 | |
Brian Cain | ce033454ca | |
Brian Cain | f998e535ce | |
Brian Cain | f6503462f6 | |
Jonathan Forscher | 8def8a794f | |
Chris Roberts | 318dca294a | |
Chris Roberts | 783f5fc65d | |
Chris Roberts | d2914a85ee | |
Chris Roberts | 5aeda42902 | |
Chris Roberts | f58b97b0fe | |
Brian Cain | 9fa3301a0c | |
Michael C | 670cadae9f | |
Brian Cain | 0f9a5bacb3 | |
Brian Cain | 33a53d0c4e | |
Brian Cain | 7c93151f67 | |
Dan Čermák | 435a32684f | |
Chris Roberts | 67270a2d64 | |
Chris Roberts | 81b23f3180 | |
Chris Roberts | b12a23273e | |
Brian Cain | a069b48952 | |
Brian Cain | 2147c6544f | |
Brian Cain | e6d47329ee | |
Brian Cain | 8458a21657 | |
Brian Cain | 62b7e35169 | |
Chris Roberts | da15c1d171 | |
Chris Roberts | 8f42dbff21 | |
Chris Roberts | 2c16c96e57 | |
Brian Cain | d22cfcb86c | |
Brian Cain | 00e0dc9279 | |
Andy Fowler | e288a5b732 | |
Andy Fowler | ae9c3e28d6 | |
Chris Roberts | 72043a8a79 | |
Chris Roberts | 0d2751686b | |
Chris Roberts | 3d026e84f2 | |
Chris Roberts | 04a1bb58f3 | |
Chris Roberts | 0bce1e6307 | |
Chris Roberts | cc23905142 | |
Chris Roberts | 13654dcc37 | |
Chris Roberts | 9ee5ce4817 | |
Chris Roberts | 7fb81bcea1 | |
Brian Cain | eec20f9007 | |
Brian Cain | ea550289a9 | |
Brian Cain | 292496b9e0 | |
Brian Cain | 7616c99c79 | |
Brian Cain | f9a75514d2 | |
Brian Cain | d6f556c83d | |
Chris Roberts | af405b03c9 | |
Brian Cain | f1ea4eaac0 | |
Brian Cain | ec963966cd | |
Brian Cain | 96e275451c | |
Chris Roberts | 5916b111a1 | |
Chris Roberts | fafde5efdc | |
Anders Kaseorg | 4afd370d6a | |
Brian Cain | c35758046d | |
Brian Cain | 656b61175d | |
Gregor Dschung | 8442b6b59f | |
Brian Cain | 0a6c4e2d0f | |
Brian Cain | 07a5190676 | |
Brian Cain | 620500c1b0 | |
Brian Cain | 968a126405 | |
Brian Cain | 03ad01f158 | |
Brian Cain | 23352d406c | |
Brian Cain | c49a58f0d3 | |
Brian Cain | f92e1a1973 | |
Brian Cain | 64cb304153 | |
Brian Cain | 96c20ad3cc | |
Brian Cain | 66ec57a637 | |
Brian Cain | 1b98f0681c | |
Brian Cain | 8f856949f2 | |
Rui Lopes | ba71c30e04 | |
Brian Cain | 5c26e651e6 | |
Brian Cain | f69bdc4bb6 | |
Brian Cain | 1c620852b6 | |
Brian Cain | b3462d805b | |
Brian Cain | 71bf5de44c | |
Brian Cain | 0d0db48552 | |
Brian Cain | c9998f4a27 | |
Chris Roberts | 916ea9dd75 | |
Brian Cain | 1d6a248a0f | |
Ladar Levison | 0712c18d68 | |
Chris Roberts | 058577ec46 | |
Ricardo Aielo | 365111f3bb | |
Dan Čermák | a3588c28a8 | |
Jose Luis Duran | 6c4b537c82 | |
Brian Cain | 9eac6ae62d | |
Brian Cain | 8b4ff9c40f | |
Brian Cain | 2581efa778 | |
Brian Cain | bab66df318 | |
Brian Cain | c7b1f8821d | |
Brian Cain | d4002aa74d | |
Brian Cain | e820b5df8e | |
Brian Cain | f14cf09af6 | |
Brian Cain | 21db2db9ac | |
Brian Cain | ebe1f3f1c9 | |
Anders Kaseorg | 5b4dcf9443 | |
Brian Cain | ace67ccdeb | |
Brian Cain | a22acba467 | |
Karsten Gresch | fc600f1e9e | |
Brian Cain | 09a37e0767 | |
Brian Cain | ccf99d8c0c | |
Nick Fagerlund | ba3aa81841 | |
Ricardo Aielo | ae3ae5078f | |
Ricardo Aielo | 990bc107fa | |
Ricardo Aielo | 546cd90cea | |
Ricardo Aielo | 068e348826 | |
Ricardo Aielo | 7167c9a2fe | |
Ricardo Aielo | 5dfbd20aa3 | |
Dan Čermák | f056231deb | |
Matt Shanahan | 6caa7bfa86 | |
Olle Jonsson | 55b364f87b | |
Yaniv Kaul | 68b3ca5a8b |
|
@ -0,0 +1,20 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
csource="${BASH_SOURCE[0]}"
|
||||
while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done
|
||||
root="$( cd -P "$( dirname "$csource" )/../" && pwd )"
|
||||
|
||||
. "${root}/.ci/init.sh"
|
||||
|
||||
pushd "${root}" > "${output}"
|
||||
|
||||
# Build our gem
|
||||
wrap gem build *.gemspec \
|
||||
"Failed to build Vagrant RubyGem"
|
||||
|
||||
# Get the path of our new gem
|
||||
g=(vagrant*.gem)
|
||||
gem=$(printf "%s" "${g}")
|
||||
|
||||
wrap aws s3 cp "${gem}" "${ASSETS_PRIVATE_BUCKET}/${repository}/vagrant-master.gem" \
|
||||
"Failed to store Vagrant RubyGem master build"
|
|
@ -0,0 +1,432 @@
|
|||
# last-modified: Tue Jan 14 20:37:58 UTC 2020
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Path to file used for output redirect
|
||||
# and extracting messages for warning and
|
||||
# failure information sent to slack
|
||||
function output_file() {
|
||||
printf "/tmp/.ci-output"
|
||||
}
|
||||
|
||||
# Write failure message, send error to configured
|
||||
# slack, and exit with non-zero status. If an
|
||||
# "$(output_file)" file exists, the last 5 lines will be
|
||||
# included in the slack message.
|
||||
#
|
||||
# $1: Failure message
|
||||
function fail() {
|
||||
(>&2 echo "ERROR: ${1}")
|
||||
if [ -f ""$(output_file)"" ]; then
|
||||
slack -s error -m "ERROR: ${1}" -f "$(output_file)" -T 5
|
||||
else
|
||||
slack -s error -m "ERROR: ${1}"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Write warning message, send warning to configured
|
||||
# slack
|
||||
#
|
||||
# $1: Warning message
|
||||
function warn() {
|
||||
(>&2 echo "WARN: ${1}")
|
||||
if [ -f ""$(output_file)"" ]; then
|
||||
slack -s warn -m "WARNING: ${1}" -f "$(output_file)"
|
||||
else
|
||||
slack -s warn -m "WARNING: ${1}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output to
|
||||
# a file (file is used within fail mesage on when
|
||||
# command is unsuccessful). Final argument is the
|
||||
# error message used when the command fails.
|
||||
#
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function wrap() {
|
||||
i=$(("${#}" - 1))
|
||||
wrap_raw "${@:1:$i}"
|
||||
if [ $? -ne 0 ]; then
|
||||
cat "$(output_file)"
|
||||
fail "${@:$#}"
|
||||
fi
|
||||
rm "$(output_file)"
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output to
|
||||
# a file. Exit status is returned.
|
||||
function wrap_raw() {
|
||||
rm -f "$(output_file)"
|
||||
"${@}" > "$(output_file)" 2>&1
|
||||
return $?
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output to
|
||||
# a file (file is used within fail mesage on when
|
||||
# command is unsuccessful). Command output will be
|
||||
# streamed during execution. Final argument is the
|
||||
# error message used when the command fails.
|
||||
#
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function wrap_stream() {
|
||||
i=$(("${#}" - 1))
|
||||
wrap_stream_raw "${@:1:$i}"
|
||||
if [ $? -ne 0 ]; then
|
||||
fail "${@:$#}"
|
||||
fi
|
||||
rm "$(output_file)"
|
||||
}
|
||||
|
||||
# Execute command while redirecting all output
|
||||
# to a file. Command output will be streamed
|
||||
# during execution. Exit status is returned
|
||||
function wrap_stream_raw() {
|
||||
rm -f "$(output_file)"
|
||||
"${@}" > "$(output_file)" 2>&1 &
|
||||
pid=$!
|
||||
until [ -f "$(output_file)" ]; do
|
||||
sleep 0.1
|
||||
done
|
||||
tail -f --quiet --pid "${pid}" "$(output_file)"
|
||||
wait "${pid}"
|
||||
return $?
|
||||
}
|
||||
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function pkt_wrap() {
|
||||
wrap packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution
|
||||
# $@: Command to execute
|
||||
function pkt_wrap_raw() {
|
||||
wrap_raw packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution with output streaming
|
||||
# $@{1:$#-1}: Command to execute
|
||||
# $@{$#}: Failure message
|
||||
function pkt_wrap_stream() {
|
||||
wrap_stream packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Send command to packet device and wrap
|
||||
# execution with output streaming
|
||||
# $@: Command to execute
|
||||
function pkt_wrap_stream_raw() {
|
||||
wrap_stream_raw packet-exec run -quiet -- "${@}"
|
||||
}
|
||||
|
||||
# Generates location within the asset storage
|
||||
# bucket to retain built assets.
|
||||
function asset_location() {
|
||||
if [ "${tag}" = "" ]; then
|
||||
dst="${ASSETS_PRIVATE_LONGTERM}/${repository}/${ident_ref}/${short_sha}"
|
||||
else
|
||||
if [[ "${tag}" = *"+"* ]]; then
|
||||
dst="${ASSETS_PRIVATE_LONGTERM}/${repository}/${tag}"
|
||||
else
|
||||
dst="${ASSETS_PRIVATE_BUCKET}/${repository}/${tag}"
|
||||
fi
|
||||
fi
|
||||
echo -n "${dst}"
|
||||
}
|
||||
|
||||
# Upload assets to the asset storage bucket.
|
||||
#
|
||||
# $1: Path to asset file or directory to upload
|
||||
function upload_assets() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "Parameter required for asset upload"
|
||||
fi
|
||||
if [ -d "${1}" ]; then
|
||||
wrap aws s3 cp --recursive "${1}" "$(asset_location)/" \
|
||||
"Upload to asset storage failed"
|
||||
else
|
||||
wrap aws s3 cp "${1}" "$(asset_location)/" \
|
||||
"Upload to asset storage failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Download assets from the asset storage bucket. If
|
||||
# destination is not provided, remote path will be
|
||||
# used locally.
|
||||
#
|
||||
# $1: Path to asset or directory to download
|
||||
# $2: Optional destination for downloaded assets
|
||||
function download_assets() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "At least one parameter required for asset download"
|
||||
fi
|
||||
if [ "${2}" = "" ]; then
|
||||
dst="${1#/}"
|
||||
else
|
||||
dst="${2}"
|
||||
fi
|
||||
mkdir -p "${dst}"
|
||||
src="$(asset_location)/${1#/}"
|
||||
remote=$(aws s3 ls "${src}")
|
||||
if [[ "${remote}" = *" PRE "* ]]; then
|
||||
mkdir -p "${dst}"
|
||||
wrap aws s3 cp --recursive "${src%/}/" "${dst}" \
|
||||
"Download from asset storage failed"
|
||||
else
|
||||
mkdir -p "$(dirname "${dst}")"
|
||||
wrap aws s3 cp "${src}" "${dst}" \
|
||||
"Download from asset storage failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Upload assets to the cache storage bucket.
|
||||
#
|
||||
# $1: Path to asset file or directory to upload
|
||||
function upload_cache() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "Parameter required for cache upload"
|
||||
fi
|
||||
if [ -d "${1}" ]; then
|
||||
wrap aws s3 cp --recursive "${1}" "${asset_cache}/" \
|
||||
"Upload to cache failed"
|
||||
else
|
||||
wrap aws s3 cp "${1}" "${asset_cache}/" \
|
||||
"Upload to cache failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Download assets from the cache storage bucket. If
|
||||
# destination is not provided, remote path will be
|
||||
# used locally.
|
||||
#
|
||||
# $1: Path to asset or directory to download
|
||||
# $2: Optional destination for downloaded assets
|
||||
function download_cache() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "At least one parameter required for cache download"
|
||||
fi
|
||||
if [ "${2}" = "" ]; then
|
||||
dst="${1#/}"
|
||||
else
|
||||
dst="${2}"
|
||||
fi
|
||||
mkdir -p "${dst}"
|
||||
src="${asset_cache}/${1#/}"
|
||||
remote=$(aws s3 ls "${src}")
|
||||
if [[ "${remote}" = *" PRE "* ]]; then
|
||||
mkdir -p "${dst}"
|
||||
wrap aws s3 cp --recursive "${src%/}/" "${dst}" \
|
||||
"Download from cache storage failed"
|
||||
else
|
||||
mkdir -p "$(dirname "${dst}")"
|
||||
wrap aws s3 cp "${src}" "${dst}" \
|
||||
"Download from cache storage failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate arguments for GitHub release. Checks for
|
||||
# two arguments and that second argument is an exiting
|
||||
# file asset, or directory.
|
||||
#
|
||||
# $1: GitHub tag name
|
||||
# $2: Asset file or directory of assets
|
||||
function release_validate() {
|
||||
if [ "${1}" = "" ]; then
|
||||
fail "Missing required position 1 argument (TAG) for release"
|
||||
fi
|
||||
if [ "${2}" = "" ]; then
|
||||
fail "Missing required position 2 argument (PATH) for release"
|
||||
fi
|
||||
if [ ! -e "${2}" ]; then
|
||||
fail "Path provided for release (${2}) does not exist"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate a GitHub release
|
||||
#
|
||||
# $1: GitHub tag name
|
||||
# $2: Asset file or directory of assets
|
||||
function release() {
|
||||
release_validate "${@}"
|
||||
wrap_raw ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${1}" -delete
|
||||
if [ $? -ne 0 ]; then
|
||||
wrap ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${1}" \
|
||||
"${1}" "${2}" "Failed to create release for version ${1}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate a GitHub prerelease
|
||||
#
|
||||
# $1: GitHub tag name
|
||||
# $2: Asset file or directory of assets
|
||||
function prerelease() {
|
||||
release_validate "${@}"
|
||||
if [[ "${1}" != *"+"* ]]; then
|
||||
ptag="${1}+${short_sha}"
|
||||
else
|
||||
ptag="${1}"
|
||||
fi
|
||||
|
||||
wrap_raw ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${ptag}" \
|
||||
-delete -prerelease "${ptag}" "${2}"
|
||||
if [ $? -ne 0 ]; then
|
||||
wrap ghr -u "${repo_owner}" -r "${repo_name}" -c "${full_sha}" -n "${ptag}" \
|
||||
-prerelease "${ptag}" "${2}" \
|
||||
"Failed to create prerelease for version ${1}"
|
||||
fi
|
||||
echo -n "${ptag}"
|
||||
}
|
||||
|
||||
# Check if version string is valid for release
|
||||
#
|
||||
# $1: Version
|
||||
# Returns: 0 if valid, 1 if invalid
|
||||
function valid_release_version() {
|
||||
if [[ "${1}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate arguments for HashiCorp release. Ensures asset
|
||||
# directory exists, and checks that the SHASUMS and SHASUM.sig
|
||||
# files are present.
|
||||
#
|
||||
# $1: Asset directory
|
||||
function hashicorp_release_validate() {
|
||||
directory="${1}"
|
||||
|
||||
# Directory checks
|
||||
if [ "${directory}" = "" ]; then
|
||||
fail "No asset directory was provided for HashiCorp release"
|
||||
fi
|
||||
if [ ! -d "${directory}" ]; then
|
||||
fail "Asset directory for HashiCorp release does not exist"
|
||||
fi
|
||||
|
||||
# SHASUMS checks
|
||||
if [ ! -e "${directory}/"*SHASUMS ]; then
|
||||
fail "Asset directory is missing SHASUMS file"
|
||||
fi
|
||||
if [ ! -e "${directory}/"*SHASUMS.sig ]; then
|
||||
fail "Asset directory is missing SHASUMS signature file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify release assets by validating checksum properly match
|
||||
# and that signature file is valid
|
||||
#
|
||||
# $1: Asset directory
|
||||
function hashicorp_release_verify() {
|
||||
directory="${1}"
|
||||
pushd "${directory}" > "${output}"
|
||||
|
||||
# First do a checksum validation
|
||||
wrap shasum -a 256 -c *_SHA256SUMS \
|
||||
"Checksum validation of release assets failed"
|
||||
# Next check that the signature is valid
|
||||
gpghome=$(mktemp -qd)
|
||||
export GNUPGHOME="${gpghome}"
|
||||
wrap gpg --import "${HASHICORP_PUBLIC_GPG_KEY}" \
|
||||
"Failed to import HashiCorp public GPG key"
|
||||
wrap gpg --verify *SHA256SUMS.sig *SHA256SUMS \
|
||||
"Validation of SHA256SUMS signature failed"
|
||||
rm -rf "${gpghome}" > "${output}" 2>&1
|
||||
popd > "${output}"
|
||||
}
|
||||
|
||||
# Generate a HashiCorp release
|
||||
#
|
||||
# $1: Asset directory
|
||||
function hashicorp_release() {
|
||||
directory="${1}"
|
||||
|
||||
hashicorp_release_validate "${directory}"
|
||||
hashicorp_release_verify "${directory}"
|
||||
|
||||
oid="${AWS_ACCESS_KEY_ID}"
|
||||
okey="${AWS_SECRET_ACCESS_KEY}"
|
||||
export AWS_ACCESS_KEY_ID="${RELEASE_AWS_ACCESS_KEY_ID}"
|
||||
export AWS_SECRET_ACCESS_KEY="${RELEASE_AWS_SECRET_ACCESS_KEY}"
|
||||
|
||||
wrap_stream hc-releases upload "${directory}" \
|
||||
"Failed to upload HashiCorp release assets"
|
||||
wrap_stream hc-releases publish \
|
||||
"Failed to publish HashiCorp release"
|
||||
|
||||
export AWS_ACCESS_KEY_ID="${oid}"
|
||||
export AWS_SECRET_ACCESS_KEY="${okey}"
|
||||
}
|
||||
|
||||
# Configures git for hashibot usage
|
||||
function hashibot_git() {
|
||||
wrap git config user.name "${HASHIBOT_USERNAME}" \
|
||||
"Failed to setup git for hashibot usage (username)"
|
||||
wrap git config user.email "${HASHIBOT_EMAIL}" \
|
||||
"Failed to setup git for hashibot usage (email)"
|
||||
wrap git remote set-url origin "https://${HASHIBOT_USERNAME}:${HASHIBOT_TOKEN}@github.com/${repository}" \
|
||||
"Failed to setup git for hashibot usage (remote)"
|
||||
}
|
||||
|
||||
# Stub cleanup method which can be redefined
|
||||
# within actual script
|
||||
function cleanup() {
|
||||
(>&2 echo "** No cleanup tasks defined")
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# Enable debugging. This needs to be enabled with
|
||||
# extreme caution when used on public repositories.
|
||||
# Output with debugging enabled will likely include
|
||||
# secret values which should not be publicly exposed.
|
||||
#
|
||||
# If repository is public, FORCE_PUBLIC_DEBUG environment
|
||||
# variable must also be set.
|
||||
|
||||
is_private=$(curl -H "Authorization: token ${HASHIBOT_TOKEN}" -s "https://api.github.com/repos/${GITHUB_REPOSITORY}" | jq .private)
|
||||
|
||||
if [ "${DEBUG}" != "" ]; then
|
||||
if [ "${is_private}" = "false" ]; then
|
||||
if [ "${FORCE_PUBLIC_DEBUG}" != "" ]; then
|
||||
set -x
|
||||
output="/dev/stdout"
|
||||
else
|
||||
fail "Cannot enable debug mode on public repository unless forced"
|
||||
fi
|
||||
else
|
||||
set -x
|
||||
output="/dev/stdout"
|
||||
fi
|
||||
else
|
||||
output="/dev/null"
|
||||
fi
|
||||
|
||||
# Check if we are running a public repository on private runners
|
||||
if [ "${VAGRANT_PRIVATE}" != "" ] && [ "${is_private}" = "false" ]; then
|
||||
fail "Cannot run public repositories on private Vagrant runners. Disable runners now!"
|
||||
fi
|
||||
|
||||
# Common variables
|
||||
full_sha="${GITHUB_SHA}"
|
||||
short_sha="${full_sha:0:8}"
|
||||
ident_ref="${GITHUB_REF#*/*/}"
|
||||
if [[ "${GITHUB_REF}" == *"refs/tags/"* ]]; then
|
||||
tag="${GITHUB_REF##*tags/}"
|
||||
valid_release_version "${tag}"
|
||||
if [ $? -eq 0 ]; then
|
||||
release=1
|
||||
fi
|
||||
fi
|
||||
repository="${GITHUB_REPOSITORY}"
|
||||
repo_owner="${repository%/*}"
|
||||
repo_name="${repository#*/}"
|
||||
asset_cache="${ASSETS_PRIVATE_SHORTTERM}/${repository}/${GITHUB_ACTION}"
|
||||
job_id="${GITHUB_ACTION}"
|
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
. "${root}/.ci/common.sh"
|
||||
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
export PATH="${PATH}:${root}/.ci"
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
ghr_version="0.13.0"
|
||||
|
||||
# NOTE: This release will generate a new release on the installers
|
||||
# repository which in turn triggers a full package build
|
||||
target_owner="hashicorp"
|
||||
target_repository="vagrant-builders"
|
||||
|
||||
csource="${BASH_SOURCE[0]}"
|
||||
while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done
|
||||
root="$( cd -P "$( dirname "$csource" )/../" && pwd )"
|
||||
|
||||
. "${root}/.ci/init.sh"
|
||||
|
||||
pushd "${root}" > "${output}"
|
||||
|
||||
# Install ghr
|
||||
wrap curl -Lso /tmp/ghr.tgz "https://github.com/tcnksm/ghr/releases/download/v${ghr_version}/ghr_v${ghr_version}_linux_amd64.tar.gz" \
|
||||
"Failed to download ghr utility"
|
||||
wrap tar -C /tmp/ -xf /tmp/ghr.tgz \
|
||||
"Failed to unpack ghr archive"
|
||||
wrap mv "/tmp/ghr_v${ghr_version}_linux_amd64/ghr" "${root}/.ci/" \
|
||||
"Failed to install ghr utility"
|
||||
|
||||
# Build our gem
|
||||
wrap gem build *.gemspec \
|
||||
"Failed to build Vagrant RubyGem"
|
||||
|
||||
# Get the path of our new gem
|
||||
g=(vagrant*.gem)
|
||||
gem=$(printf "%s" "${g}")
|
||||
|
||||
# Determine the version of the release
|
||||
vagrant_version="$(gem specification "${gem}" version)"
|
||||
vagrant_version="${vagrant_version##*version: }"
|
||||
|
||||
# We want to release into the builders repository so
|
||||
# update the repository variable with the desired destination
|
||||
repo_owner="${target_owner}"
|
||||
repo_name="${target_repository}"
|
||||
full_sha="master"
|
||||
|
||||
export GITHUB_TOKEN="${HASHIBOT_TOKEN}"
|
||||
|
||||
if [ "${tag}" = "" ]; then
|
||||
echo "Generating Vagrant RubyGem pre-release... "
|
||||
version="v${vagrant_version}+${short_sha}"
|
||||
prerelease "${version}" "${gem}"
|
||||
else
|
||||
# Validate this is a proper release version
|
||||
valid_release_version "${vagrant_version}"
|
||||
if [ $? -ne 0 ]; then
|
||||
fail "Invalid version format for Vagrant release: ${vagrant_version}"
|
||||
fi
|
||||
|
||||
echo "Generating Vagrant RubyGem release... "
|
||||
version="v${vagrant_version}"
|
||||
release "${version}" "${gem}"
|
||||
fi
|
||||
|
||||
slack -m "New Vagrant installers release triggered: *${version}*"
|
|
@ -0,0 +1,176 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
require "optparse"
|
||||
require "net/https"
|
||||
require "uri"
|
||||
require "json"
|
||||
|
||||
OPTIONS = [:channel, :username, :icon, :state, :message,
|
||||
:message_file, :file, :title, :tail, :webhook].freeze
|
||||
|
||||
options = {}
|
||||
|
||||
OptionParser.new do |opts|
|
||||
opts.banner = "Usage: #{File.basename(__FILE__)} [options]"
|
||||
|
||||
opts.on("-c", "--channel CHAN", "Send to channel") do |c|
|
||||
options[:channel] = c
|
||||
end
|
||||
|
||||
opts.on("-u", "--username USER", "Send as username") do |u|
|
||||
options[:username] = u
|
||||
end
|
||||
|
||||
opts.on("-i", "--icon URL", "User icon image") do |i|
|
||||
options[:icon] = i
|
||||
end
|
||||
|
||||
opts.on("-s", "--state STATE", "Message state (success, warn, error, or color code)") do |s|
|
||||
options[:state] = s
|
||||
end
|
||||
|
||||
opts.on("-m", "--message MESSAGE", "Message to send") do |m|
|
||||
options[:message] = m
|
||||
end
|
||||
|
||||
opts.on("-M", "--message-file MESSAGE_FILE", "Use file contents as message") do |m|
|
||||
options[:message_file] = m
|
||||
end
|
||||
|
||||
opts.on("-f", "--file MESSAGE_FILE", "Send raw contents of file in message") do |f|
|
||||
options[:file] = f
|
||||
end
|
||||
|
||||
opts.on("-t", "--title TITLE", "Message title") do |t|
|
||||
options[:title] = t
|
||||
end
|
||||
|
||||
opts.on("-T", "--tail N", "Send last N lines of content from raw message file") do |t|
|
||||
options[:tail] = t
|
||||
end
|
||||
|
||||
opts.on("-w", "--webhook HOOK", "Slack webhook") do |w|
|
||||
options[:webhook] = w
|
||||
end
|
||||
|
||||
opts.on("-h", "--help", "Print help") do
|
||||
puts opts
|
||||
exit
|
||||
end
|
||||
end.parse!
|
||||
|
||||
OPTIONS.each do |key|
|
||||
if !options.key?(key)
|
||||
env_key = "SLACK_#{key.to_s.upcase}"
|
||||
if ENV[env_key]
|
||||
options[key] = ENV[env_key]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if !options[:webhook]
|
||||
$stderr.puts "ERROR: Webhook is required!"
|
||||
exit 1
|
||||
end
|
||||
|
||||
if ENV["CIRCLECI"]
|
||||
options[:icon] = "https://emoji.slack-edge.com/TF1GCKJNM/circleci/054b58d488e65138.png" unless options[:icon]
|
||||
options[:username] = "circleci" unless options[:username]
|
||||
options[:footer] = "CircleCI - <#{ENV["CIRCLE_BUILD_URL"]}|#{ENV["CIRCLE_PROJECT_USERNAME"]}/#{ENV["CIRCLE_PROJECT_REPONAME"]}>"
|
||||
options[:footer_icon] = "https://emoji.slack-edge.com/TF1GCKJNM/circleci/054b58d488e65138.png"
|
||||
end
|
||||
|
||||
if ENV["GITHUB_ACTIONS"]
|
||||
options[:icon] = "https://ca.slack-edge.com/T024UT03C-WG8NDATGT-f82ae03b9fca-48" unless options[:icon]
|
||||
options[:username] = "github" unless options[:username]
|
||||
options[:footer] = "Actions - <https://github.com/#{ENV["GITHUB_REPOSITORY"]}/commit/#{ENV["GITHUB_SHA"]}/checks|#{ENV["GITHUB_REPOSITORY"]}>"
|
||||
options[:footer_icon] = "https://ca.slack-edge.com/T024UT03C-WG8NDATGT-f82ae03b9fca-48"
|
||||
end
|
||||
|
||||
options[:state] = "success" unless options[:state]
|
||||
|
||||
case options[:state]
|
||||
when "success", "good"
|
||||
options[:state] = "good"
|
||||
when "warn", "warning"
|
||||
options[:state] = "warning"
|
||||
when "error", "danger"
|
||||
options[:state] = "danger"
|
||||
else
|
||||
if !options[:state].start_with?("#")
|
||||
$stderr.puts "ERROR: Invalid value for `state` (#{options[:state]})"
|
||||
exit 1
|
||||
end
|
||||
end
|
||||
|
||||
msg = options[:message]
|
||||
|
||||
# NOTE: Message provided from CLI argument will end up with
|
||||
# double escaped newlines so remove one
|
||||
msg.gsub!("\\n", "\n") if msg
|
||||
|
||||
if options[:message_file]
|
||||
if !File.exist?(options[:message_file])
|
||||
$stderr.puts "ERROR: Message file does not exist `#{options[:message_file]}`"
|
||||
exit 1
|
||||
end
|
||||
msg_c = File.read(options[:message_file])
|
||||
msg = msg ? "#{msg}\n\n#{msg_c}" : msg_c
|
||||
end
|
||||
|
||||
if options[:file]
|
||||
if !File.exist?(options[:file])
|
||||
$stderr.puts "ERROR: Message file does not exist `#{options[:file]}`"
|
||||
exit 1
|
||||
end
|
||||
if (tail = options[:tail].to_i) > 0
|
||||
content = ""
|
||||
buffer = 0
|
||||
File.open(options[:file], "r") do |f|
|
||||
until (content.split("\n").size > tail) || buffer >= f.size
|
||||
buffer += 1000
|
||||
buffer = f.size if buffer > f.size
|
||||
f.seek(f.size - buffer)
|
||||
content = f.read
|
||||
end
|
||||
end
|
||||
parts = content.split("\n")
|
||||
if parts.size > tail
|
||||
parts = parts.slice(-tail, tail)
|
||||
end
|
||||
fmsg = parts ? parts.join("\n") : ""
|
||||
else
|
||||
fmsg = File.read(options[:file])
|
||||
end
|
||||
fmsg = "```\n#{fmsg}\n```"
|
||||
if msg
|
||||
msg = msg << "\n\n" << fmsg
|
||||
end
|
||||
end
|
||||
|
||||
if msg.to_s.empty?
|
||||
$stderr.puts "ERROR: No message content provided!"
|
||||
exit 1
|
||||
end
|
||||
|
||||
attach = {text: msg, fallback: msg, color: options[:state], mrkdn: true}
|
||||
attach[:title] = options[:title] if options[:title]
|
||||
attach[:footer] = options[:footer] if options[:footer]
|
||||
attach[:footer_icon] = options[:footer_icon] if options[:footer_icon]
|
||||
attach[:ts] = Time.now.to_i
|
||||
|
||||
payload = {}.tap do |pd|
|
||||
pd[:username] = options.fetch(:username, "packet-exec")
|
||||
pd[:channel] = options[:channel] if options[:channel]
|
||||
pd[:icon_url] = options[:icon] if options[:icon]
|
||||
pd[:attachments] = [attach]
|
||||
end
|
||||
|
||||
result = Net::HTTP.post(URI(options[:webhook]), payload.to_json, "Content-Type" => "application/json")
|
||||
|
||||
if !result.code.start_with?("2")
|
||||
$stderr.puts "Failed to send slack message"
|
||||
exit 1
|
||||
else
|
||||
$stdout.puts "ok"
|
||||
end
|
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
csource="${BASH_SOURCE[0]}"
|
||||
while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done
|
||||
root="$( cd -P "$( dirname "$csource" )/../" && pwd )"
|
||||
|
||||
pushd "${root}" > /dev/null
|
||||
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
# Install required dependencies
|
||||
sudo apt-get update || exit 1
|
||||
sudo apt-get install -yq bsdtar || exit 1
|
||||
|
||||
# Ensure bundler is installed
|
||||
gem install --no-document bundler || exit 1
|
||||
|
||||
# Install the bundle
|
||||
bundle install || exit 1
|
||||
|
||||
# Run tests
|
||||
bundle exec rake test:unit
|
||||
|
||||
result=$?
|
||||
popd > /dev/null
|
||||
|
||||
exit $result
|
|
@ -1,133 +1,6 @@
|
|||
version: 2
|
||||
reference:
|
||||
environment: &ENVIRONMENT
|
||||
SLACK_TITLE: Vagrant CI
|
||||
RELEASE_TARGET_REPONAME: vagrant-installers
|
||||
images:
|
||||
ruby23: &ruby23
|
||||
docker:
|
||||
- image: circleci/ruby:2.3
|
||||
ruby24: &ruby24
|
||||
docker:
|
||||
- image: circleci/ruby:2.4
|
||||
ruby25: &ruby25
|
||||
docker:
|
||||
- image: circleci/ruby:2.5
|
||||
ruby26: &ruby26
|
||||
docker:
|
||||
- image: circleci/ruby:2.6
|
||||
builder: &builder
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
docker:
|
||||
- image: $BUILDER_IMAGE
|
||||
auth:
|
||||
username: $BUILDER_USERNAME
|
||||
password: $BUILDER_PASSWORD
|
||||
workflows:
|
||||
public: &PUBLIC_WORKFLOW
|
||||
filters:
|
||||
branches:
|
||||
only: /^pull\/.*/
|
||||
master: &MASTER_WORKFLOW
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
private_build: &PRIVATE_WORKFLOW_BUILD
|
||||
context: vagrant
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^build-.*/
|
||||
tags:
|
||||
only: /.*/
|
||||
jobs:
|
||||
private_failure: &PRIVATE_FAILURE
|
||||
run:
|
||||
name: Failure handler
|
||||
command: |
|
||||
if [ -f .output ]; then
|
||||
slack -m "Vagrant job has failed: *${CIRCLE_JOB}*" -s error -f .output -T 5
|
||||
else
|
||||
slack -m "Vagrant job has failed: *${CIRCLE_JOB}*" -s error
|
||||
fi
|
||||
when: on_fail
|
||||
unit_tests: &unit_tests
|
||||
steps:
|
||||
- run: sudo apt-get update ; sudo apt-get -yq install bsdtar
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
||||
- run:
|
||||
command: bundle check || bundle install --path vendor/bundle
|
||||
- save_cache:
|
||||
key: static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
||||
paths:
|
||||
- ./vendor/bundle
|
||||
- run: bundle exec rake test:unit
|
||||
|
||||
jobs:
|
||||
build:
|
||||
<<: *builder
|
||||
steps:
|
||||
- checkout
|
||||
- run: gem build vagrant.gemspec
|
||||
- *PRIVATE_FAILURE
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- ./*.gem
|
||||
store:
|
||||
<<: *builder
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: |
|
||||
gem_name=(vagrant-*.gem)
|
||||
if [ "${CIRCLE_TAG}" == "" ]; then
|
||||
remote_gem_name="vagrant-master.gem"
|
||||
else
|
||||
remote_gem_name="vagrant.gem"
|
||||
fi
|
||||
if [[ "${CIRCLE_BRANCH}" = "build-"* ]]; then
|
||||
s3_dst="${ASSETS_PRIVATE_LONGTERM}/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/${CIRCLE_BRANCH##build-}/"
|
||||
else
|
||||
s3_dst="${ASSETS_PRIVATE_BUCKET}/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/"
|
||||
fi
|
||||
aws s3 cp "${gem_name}" "${s3_dst}${remote_gem_name}" > .output 2>&1
|
||||
- *PRIVATE_FAILURE
|
||||
release:
|
||||
<<: *builder
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: |
|
||||
set +e
|
||||
gem=(vagrant-*.gem)
|
||||
gem_version="${gem##vagrant-}"
|
||||
gem_version="${gem_version%%.gem}"
|
||||
export GITHUB_TOKEN="${HASHIBOT_TOKEN}"
|
||||
if [ "${CIRCLE_TAG}" = "" ]; then
|
||||
version="v${gem_version}+$(git rev-parse --short "${CIRCLE_SHA1}")"
|
||||
ghr -u ${CIRCLE_PROJECT_USERNAME} -r ${RELEASE_TARGET_REPONAME} -c master -prerelease -delete -replace ${version} ${gem} > .output 2>&1
|
||||
else
|
||||
version="${CIRCLE_TAG}"
|
||||
ghr -u ${CIRCLE_PROJECT_USERNAME} -r ${RELEASE_TARGET_REPONAME} -c master -delete -replace ${version} ${gem} > .output 2>&1
|
||||
fi
|
||||
slack -m "New Vagrant installers release triggered: *${version}*"
|
||||
- *PRIVATE_FAILURE
|
||||
test_ruby23:
|
||||
<<: *ruby23
|
||||
<<: *unit_tests
|
||||
test_ruby24:
|
||||
<<: *ruby24
|
||||
<<: *unit_tests
|
||||
test_ruby25:
|
||||
<<: *ruby25
|
||||
<<: *unit_tests
|
||||
test_ruby26:
|
||||
<<: *ruby26
|
||||
<<: *unit_tests
|
||||
build-website:
|
||||
# setting the working_directory along with the checkout path allows us to not have
|
||||
# to cd into the website/ directory for commands
|
||||
|
@ -151,51 +24,6 @@ jobs:
|
|||
command: ./scripts/deploy.sh
|
||||
workflows:
|
||||
version: 2
|
||||
builds:
|
||||
jobs:
|
||||
- build:
|
||||
<<: *PRIVATE_WORKFLOW_BUILD
|
||||
- store:
|
||||
<<: *PRIVATE_WORKFLOW_BUILD
|
||||
requires:
|
||||
- build
|
||||
- release:
|
||||
<<: *PRIVATE_WORKFLOW_BUILD
|
||||
requires:
|
||||
- build
|
||||
pull_requests:
|
||||
jobs:
|
||||
- test_ruby23:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
- test_ruby24:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
- test_ruby25:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
- test_ruby26:
|
||||
<<: *PUBLIC_WORKFLOW
|
||||
master:
|
||||
jobs:
|
||||
- test_ruby23:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- test_ruby24:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- test_ruby25:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- test_ruby26:
|
||||
<<: *MASTER_WORKFLOW
|
||||
- build:
|
||||
<<: *MASTER_WORKFLOW
|
||||
context: vagrant
|
||||
requires:
|
||||
- test_ruby23
|
||||
- test_ruby24
|
||||
- test_ruby25
|
||||
- test_ruby26
|
||||
- store:
|
||||
<<: *MASTER_WORKFLOW
|
||||
context: vagrant
|
||||
requires:
|
||||
- build
|
||||
website:
|
||||
jobs:
|
||||
- build-website:
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'website/**'
|
||||
|
||||
jobs:
|
||||
build-gem:
|
||||
name: Build Vagrant RubyGem
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Set Ruby
|
||||
uses: actions/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '2.6'
|
||||
- name: Build RubyGem
|
||||
run: ./.ci/build.sh
|
||||
working-directory: ${{github.workspace}}
|
||||
env:
|
||||
ASSETS_LONGTERM_PREFIX: elt
|
||||
ASSETS_PRIVATE_BUCKET: ${{ secrets.ASSETS_PRIVATE_BUCKET }}
|
||||
ASSETS_PRIVATE_LONGTERM: ${{ secrets.ASSETS_PRIVATE_LONGTERM }}
|
||||
ASSETS_PRIVATE_SHORTTERM: ${{ secrets.ASSETS_PRIVATE_SHORTTERM }}
|
||||
ASSETS_PUBLIC_BUCKET: ${{ secrets.ASSETS_PUBLIC_BUCKET }}
|
||||
ASSETS_PUBLIC_LONGTERM: ${{ secrets.ASSETS_PUBLIC_LONGTERM }}
|
||||
ASSETS_PUBLIC_SHORTTERM: ${{ secrets.ASSETS_PUBLIC_SHORTTERM }}
|
||||
ASSETS_SHORTTERM_PREFIX: est
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
HASHIBOT_EMAIL: ${{ secrets.HASHIBOT_EMAIL }}
|
||||
HASHIBOT_TOKEN: ${{ secrets.HASHIBOT_TOKEN }}
|
||||
HASHIBOT_USERNAME: ${{ secrets.HASHIBOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
|
@ -0,0 +1,38 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'build-*'
|
||||
tags: '*'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'website/**'
|
||||
|
||||
jobs:
|
||||
trigger-release:
|
||||
name: Trigger Installers Build
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Set Ruby
|
||||
uses: actions/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '2.6'
|
||||
- name: Create Builders Release
|
||||
run: ./.ci/release.sh
|
||||
working-directory: ${{github.workspace}}
|
||||
env:
|
||||
ASSETS_LONGTERM_PREFIX: elt
|
||||
ASSETS_PRIVATE_BUCKET: est
|
||||
ASSETS_PRIVATE_LONGTERM: ${{ secrets.ASSETS_PRIVATE_LONGTERM }}
|
||||
ASSETS_PRIVATE_SHORTTERM: ${{ secrets.ASSETS_PRIVATE_SHORTTERM }}
|
||||
ASSETS_PUBLIC_BUCKET: ${{ secrets.ASSETS_PUBLIC_BUCKET }}
|
||||
ASSETS_PUBLIC_LONGTERM: ${{ secrets.ASSETS_PUBLIC_LONGTERM }}
|
||||
ASSETS_PUBLIC_SHORTTERM: ${{ secrets.ASSETS_PUBLIC_SHORTTERM }}
|
||||
ASSETS_SHORTTERM_PREFIX: ${{ secrets.ASSETS_SHORTTERM_PREFIX }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
HASHIBOT_EMAIL: ${{ secrets.HASHIBOT_EMAIL }}
|
||||
HASHIBOT_TOKEN: ${{ secrets.HASHIBOT_TOKEN }}
|
||||
HASHIBOT_USERNAME: ${{ secrets.HASHIBOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
|
@ -0,0 +1,29 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- 'test-*'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'website/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
runs-on: ubuntu-18.04
|
||||
strategy:
|
||||
matrix:
|
||||
ruby: [ '2.4.x', '2.5.x', '2.6.x' ]
|
||||
name: Vagrant unit tests on Ruby ${{ matrix.ruby }}
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Setup Ruby
|
||||
uses: actions/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: ${{matrix.ruby}}
|
||||
architecture: 'x64'
|
||||
- name: Run Tests
|
||||
run: .ci/test.sh
|
|
@ -0,0 +1,12 @@
|
|||
poll "closed_issue_locker" "locker" {
|
||||
schedule = "0 50 1 * * *"
|
||||
closed_for = "720h" # 30 days
|
||||
max_issues = 500
|
||||
sleep_between_issues = "5s"
|
||||
|
||||
message = <<-EOF
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
EOF
|
||||
}
|
26
.travis.yml
26
.travis.yml
|
@ -1,26 +0,0 @@
|
|||
language: ruby
|
||||
|
||||
sudo: false
|
||||
|
||||
cache: bundler
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- bsdtar
|
||||
|
||||
rvm:
|
||||
- 2.3.8
|
||||
- 2.4.5
|
||||
- 2.5.3
|
||||
- 2.6.1
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
env:
|
||||
global:
|
||||
- NOKOGIRI_USE_SYSTEM_LIBRARIES=true
|
||||
|
||||
script: bundle exec rake test:unit
|
79
CHANGELOG.md
79
CHANGELOG.md
|
@ -2,18 +2,81 @@
|
|||
|
||||
FEATURES:
|
||||
|
||||
- guest/alpine: Integrate the vagrant-alpine plugin into Vagrant core [GH-10975]
|
||||
- core/provisioners: Introduce new Provisioner options: before and after [GH-11043]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
- guest/alt: Improve handling for using network tools when setting hostname [GH-11000]
|
||||
- command/box/prune: Allow prompt skip while preserving actively in use boxes [GH-10908]
|
||||
- guest/suse: Add ipv6 network config templates for SUSE based distributions [GH-11013]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
## 2.2.7 (January 27, 2020)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
- guest/opensuse: Check for basename hostname prior to setting hostname [GH-11170]
|
||||
- host/linux: Check for modinfo in /sbin if it's not on PATH [GH-11178]
|
||||
- core: Show guest name in hostname error message [GH-11175]
|
||||
- provisioners/shell: Linux guests now support `reboot` option [GH-11194]
|
||||
- darwin/nfs: Put each NFS export on its own line [GH-11216]
|
||||
- contrib/bash: Add more completion flags to up command [GH-11223]
|
||||
- provider/virtualbox: Add VirtualBox provider support for version 6.1.x [GH-11250]
|
||||
- box/outdated: Allow to force check for box updates and ignore cached check [GH-11231]
|
||||
- guest/alpine: Update apk cache when installing rsync [GH-11220]
|
||||
- provider/virtualbox: Improve error message when machine folder is inaccessible [GH-11239]
|
||||
- provisioner/ansible_local: Add pip install method for arch guests [GH-11265]
|
||||
- communicators/winssh: Use Windows shell for `vagrant ssh -c` [GH-11258]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
- command/snapshot/save: Fix regression that prevented snapshot of all guests in environment [GH-11152]
|
||||
- core: Update UI to properly retain newlines when adding prefix [GH-11126]
|
||||
- core: Check if box update is available locally [GH-11188]
|
||||
- core: Ensure Vagrant::Errors are loaded in file_checksum util [GH-11183]
|
||||
- cloud/publish: Improve argument handling for missing arguments to command [GH-11184]
|
||||
- core: Get latest version for current provider during outdated check [GH-11192]
|
||||
- linux/nfs: avoid adding extra newlines to /etc/exports [GH-11201]
|
||||
- guest/darwin: Fix VMware synced folders on APFS [GH-11267]
|
||||
- guest/redhat: Ensure `nfs-server` is restarted when installing nfs client [GH-11212]
|
||||
- core: Do not validate checksums if options are empty string [GH-11211]
|
||||
- provider/docker: Enhance docker build method to match against buildkit output [GH-11205]
|
||||
- provisioner/ansible_local: Don't prompt for input when installing Ansible on Ubuntu and Debian [GH-11191]
|
||||
- provisioner/ansible_local: Ensure all guest caps accept all passed in arguments [GH-11265]
|
||||
- host/windows: Fix regression that prevented port collisions from being detected [GH-11244]
|
||||
- core/provisioner: Set top level provisioner name if set in a provisioner config [GH-11295]
|
||||
|
||||
## 2.2.6 (October 14, 2019)
|
||||
|
||||
FEATURES:
|
||||
|
||||
- core/provisioners: Introduce new Provisioner options: before and after [GH-11043]
|
||||
- guest/alpine: Integrate the vagrant-alpine plugin into Vagrant core [GH-10975]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
- command/box/prune: Allow prompt skip while preserving actively in use boxes [GH-10908]
|
||||
- command/cloud: Support providing checksum information with boxes [GH-11101]
|
||||
- dev: Fixed Vagrantfile for Vagrant development [GH-11012]
|
||||
- guest/alt: Improve handling for using network tools when setting hostname [GH-11000]
|
||||
- guest/suse: Add ipv6 network config templates for SUSE based distributions [GH-11013]
|
||||
- guest/windows: Retry on connection timeout errors for the reboot capability [GH-11093]
|
||||
- host/bsd: Use host resolve path capability to modify local paths if required [GH-11108]
|
||||
- host/darwin: Add host resolve path capability to provide real paths for firmlinks [GH-11108]
|
||||
- provisioners/chef: Update pkg install flags for chef on FreeBSD guests [GH-11075]
|
||||
- provider/hyperv: Improve error message when VMMS is not running [GH-10978]
|
||||
- provider/virtualbox: Raise additional errors for incomplete virtualbox installation on usable check [GH-10938]
|
||||
- util/filechecksum: Add support for more checksum types [GH-11101]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
- command/rsync-auto: Fix path watcher bug so that all subdirectories are synced when changed [GH-11089]
|
||||
- command/snapshot/save: Ensure VM id is passed to list snapshots for hyper-v provider [GH-11097]
|
||||
- core: Ensure proper paths are shown in config loading exceptions [GH-11056]
|
||||
- guest/suse: Use hostnamectl instead of hostname to set the hostname under SUSE [GH-11100]
|
||||
- provider/docker: Fix default provider validation if password is used [GH-11053]
|
||||
- provider/docker: Fix Docker providers usable? check [GH-11068]
|
||||
- provisioner/ansible_local: Ensure pip_install_cmd is finalized to emptry string [GH-11098]
|
||||
- provisioner/file: Ensure relative path for file provisioner source is relative to guest machines cwd [GH-11099]
|
||||
- provider/docker: Ensure docker build_args option is properly set in docker compose config yaml [GH-11106]
|
||||
- guest/suse: Update nfs & service daemon names for suse based hosts and guests [GH-11076]
|
||||
- provider/docker: Determine ip address prefix workaround for docker public networks [GH-11111]
|
||||
- provider/docker: Only return interfaces where addr is not nil for networks [GH-11116]
|
||||
|
||||
## 2.2.5 (June 19, 2019)
|
||||
|
||||
|
@ -489,7 +552,7 @@ BUG FIXES:
|
|||
- core: Rescue more exceptions when checking if port is open [GH-8517]
|
||||
- guests/solaris11: Inherit from Solaris guest and keep solaris11 specific methods [GH-9034]
|
||||
- guests/windows: Split out cygwin path helper for msys2/cygwin paths and ensure cygpath exists [GH-8972]
|
||||
- guests/windows: Specify expected shell when executing on guest (fixes winssh communicator usage) [GH-9012]
|
||||
- guests/windows: Specify expected shell when executing on guest (fixes einssh communicator usage) [GH-9012]
|
||||
- guests/windows: Include WinSSH Communicator when using insert_public_key [GH-9105]
|
||||
- hosts/windows: Check for vagrant.exe when validating versions within WSL [GH-9107, GH-8962]
|
||||
- providers/docker: Isolate windows check within executor to handle running through VM [GH-8921]
|
||||
|
|
|
@ -4,13 +4,13 @@
|
|||
# Ruby, run unit tests, etc.
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "hashicorp/precise64"
|
||||
config.vm.box = "hashicorp/bionic64"
|
||||
config.vm.hostname = "vagrant"
|
||||
config.ssh.shell = "bash -c 'BASH_ENV=/etc/profile exec bash'"
|
||||
|
||||
["vmware_fusion", "vmware_workstation", "virtualbox"].each do |provider|
|
||||
["vmware_desktop", "virtualbox", "hyperv"].each do |provider|
|
||||
config.vm.provider provider do |v, override|
|
||||
v.memory = "1024"
|
||||
v.memory = "2048"
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -29,32 +29,32 @@ $shell = <<-'CONTENTS'
|
|||
export DEBIAN_FRONTEND=noninteractive
|
||||
MARKER_FILE="/usr/local/etc/vagrant_provision_marker"
|
||||
RUBY_VER_REQ=$(awk '$1 == "s.required_ruby_version" { print $4 }' /vagrant/vagrant.gemspec | tr -d '"')
|
||||
BUNDLER_VER_REQ=$(awk '/s.add_dependency "bundler"/ { print $4 }' /vagrant/vagrant.gemspec | tr -d '"')
|
||||
|
||||
# Only provision once
|
||||
if [ -f "${MARKER_FILE}" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Add ubuntu_rvm repo
|
||||
apt-add-repository -y ppa:rael-gc/rvm
|
||||
|
||||
# Update apt
|
||||
apt-get update --quiet
|
||||
|
||||
# Install basic dependencies
|
||||
apt-get install -qy build-essential bsdtar curl
|
||||
# Add vagrant user to sudo group:
|
||||
# ubuntu_rvm only adds users in group sudo to group rvm
|
||||
usermod -a -G sudo vagrant
|
||||
|
||||
# Install basic dependencies and RVM
|
||||
apt-get install -qy build-essential bsdtar rvm
|
||||
|
||||
# Import the mpapis public key to verify downloaded releases
|
||||
su -l -c 'gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3' vagrant
|
||||
|
||||
# Install RVM
|
||||
su -l -c 'curl -sL https://get.rvm.io | bash -s stable' vagrant
|
||||
|
||||
# Add the vagrant user to the RVM group
|
||||
#usermod -a -G rvm vagrant
|
||||
|
||||
# Install latest Ruby that complies with Vagrant's version constraint
|
||||
RUBY_VER_LATEST=$(su -l -c 'rvm list known' vagrant | tr '[]-' ' ' | awk "/^ ruby ${RUBY_VER_REQ:0:1}\./ { print \$2 }" | sort | tail -n1)
|
||||
su -l -c "rvm install ${RUBY_VER_LATEST}" vagrant
|
||||
su -l -c "rvm --default use ${RUBY_VER_LATEST}" vagrant
|
||||
# Install next-to-last Ruby that complies with Vagrant's version constraint
|
||||
RUBY_VER=$(su -l -c 'rvm list known' vagrant | tr '[]-' ' ' | awk "/^ ruby ${RUBY_VER_REQ:0:1}\./ { print \$2 }" | sort -r | sed -n '2p')
|
||||
su -l -c "rvm install ${RUBY_VER}" vagrant
|
||||
su -l -c "rvm --default use ${RUBY_VER}" vagrant
|
||||
|
||||
# Output the Ruby version (for sanity)
|
||||
su -l -c 'ruby --version' vagrant
|
||||
|
@ -63,15 +63,14 @@ su -l -c 'ruby --version' vagrant
|
|||
apt-get install -qy git
|
||||
|
||||
# Upgrade Rubygems
|
||||
su -l -c "rvm ${RUBY_VER_LATEST} do gem update --system" vagrant
|
||||
su -l -c "rvm ${RUBY_VER} do gem update --system" vagrant
|
||||
|
||||
# Install bundler and prepare to run unit tests
|
||||
su -l -c "gem install bundler -v ${BUNDLER_VER_REQ}" vagrant
|
||||
# Prepare to run unit tests
|
||||
su -l -c 'cd /vagrant; bundle install' vagrant
|
||||
|
||||
# Automatically move into the shared folder, but only add the command
|
||||
# if it's not already there.
|
||||
grep -q 'cd /vagrant' /home/vagrant/.bash_profile || echo 'cd /vagrant' >> /home/vagrant/.bash_profile
|
||||
grep -q 'cd /vagrant' /home/vagrant/.bash_profile 2>/dev/null || echo 'cd /vagrant' >> /home/vagrant/.bash_profile
|
||||
|
||||
# Touch the marker file so we don't do this again
|
||||
touch ${MARKER_FILE}
|
||||
|
|
|
@ -75,7 +75,19 @@ _vagrant() {
|
|||
then
|
||||
local vm_list=$(find "${vagrant_state_file}/machines" -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
|
||||
fi
|
||||
local up_commands="--no-provision"
|
||||
local up_commands="\
|
||||
--provision \
|
||||
--no-provision \
|
||||
--provision-with \
|
||||
--destroy-on-error \
|
||||
--no-destroy-on-error \
|
||||
--parallel \
|
||||
--no-parallel
|
||||
--provider \
|
||||
--install-provider \
|
||||
--no-install-provider \
|
||||
-h \
|
||||
--help"
|
||||
COMPREPLY=($(compgen -W "${up_commands} ${vm_list}" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
|
|
|
@ -15,6 +15,7 @@ module Vagrant
|
|||
autoload :Confirm, "vagrant/action/builtin/confirm"
|
||||
autoload :ConfigValidate, "vagrant/action/builtin/config_validate"
|
||||
autoload :DestroyConfirm, "vagrant/action/builtin/destroy_confirm"
|
||||
autoload :Disk, "vagrant/action/builtin/disk"
|
||||
autoload :EnvSet, "vagrant/action/builtin/env_set"
|
||||
autoload :GracefulHalt, "vagrant/action/builtin/graceful_halt"
|
||||
autoload :HandleBox, "vagrant/action/builtin/handle_box"
|
||||
|
|
|
@ -348,9 +348,15 @@ module Vagrant
|
|||
end
|
||||
|
||||
if opts[:checksum] && opts[:checksum_type]
|
||||
env[:ui].detail(I18n.t("vagrant.actions.box.add.checksumming"))
|
||||
validate_checksum(
|
||||
opts[:checksum_type], opts[:checksum], box_url)
|
||||
if opts[:checksum].to_s.strip.empty?
|
||||
@logger.warn("Given checksum is empty, cannot validate checksum for box")
|
||||
elsif opts[:checksum_type].to_s.strip.empty?
|
||||
@logger.warn("Given checksum type is empty, cannot validate checksum for box")
|
||||
else
|
||||
env[:ui].detail(I18n.t("vagrant.actions.box.add.checksumming"))
|
||||
validate_checksum(
|
||||
opts[:checksum_type], opts[:checksum], box_url)
|
||||
end
|
||||
end
|
||||
|
||||
# Add the box!
|
||||
|
@ -527,22 +533,11 @@ module Vagrant
|
|||
end
|
||||
|
||||
def validate_checksum(checksum_type, checksum, path)
|
||||
checksum_klass = case checksum_type.to_sym
|
||||
when :md5
|
||||
Digest::MD5
|
||||
when :sha1
|
||||
Digest::SHA1
|
||||
when :sha256
|
||||
Digest::SHA2
|
||||
else
|
||||
raise Errors::BoxChecksumInvalidType,
|
||||
type: checksum_type.to_s
|
||||
end
|
||||
|
||||
@logger.info("Validating checksum with #{checksum_klass}")
|
||||
@logger.info("Validating checksum with #{checksum_type}")
|
||||
@logger.info("Expected checksum: #{checksum}")
|
||||
|
||||
actual = FileChecksum.new(path, checksum_klass).checksum
|
||||
actual = FileChecksum.new(path, checksum_type).checksum
|
||||
@logger.info("Actual checksum: #{actual}")
|
||||
if actual.casecmp(checksum) != 0
|
||||
raise Errors::BoxChecksumMismatch,
|
||||
actual: actual,
|
||||
|
|
|
@ -40,7 +40,7 @@ module Vagrant
|
|||
# Have download options specified in the environment override
|
||||
# options specified for the machine.
|
||||
download_options = {
|
||||
automatic_check: true,
|
||||
automatic_check: !env[:box_outdated_force],
|
||||
ca_cert: env[:ca_cert] || machine.config.vm.box_download_ca_cert,
|
||||
ca_path: env[:ca_path] || machine.config.vm.box_download_ca_path,
|
||||
client_cert: env[:client_cert] ||
|
||||
|
@ -70,15 +70,23 @@ module Vagrant
|
|||
message: e.message))
|
||||
end
|
||||
env[:box_outdated] = update != nil
|
||||
if update
|
||||
local_update = check_outdated_local(env)
|
||||
if update && (local_update.nil? || (local_update.version < update[1].version))
|
||||
env[:ui].warn(I18n.t(
|
||||
"vagrant.box_outdated_single",
|
||||
name: update[0].name,
|
||||
provider: box.provider,
|
||||
current: box.version,
|
||||
latest: update[1].version))
|
||||
elsif local_update
|
||||
env[:ui].warn(I18n.t(
|
||||
"vagrant.box_outdated_local",
|
||||
name: local_update.name,
|
||||
old: box.version,
|
||||
new: local_update.version))
|
||||
env[:box_outdated] = true
|
||||
else
|
||||
check_outdated_local(env)
|
||||
env[:box_outdated] = false
|
||||
end
|
||||
|
||||
@app.call(env)
|
||||
|
@ -93,19 +101,8 @@ module Vagrant
|
|||
version ||= ""
|
||||
version += "> #{machine.box.version}"
|
||||
|
||||
box = env[:box_collection].find(
|
||||
env[:box_collection].find(
|
||||
machine.box.name, machine.box.provider, version)
|
||||
if box
|
||||
env[:ui].warn(I18n.t(
|
||||
"vagrant.box_outdated_local",
|
||||
name: box.name,
|
||||
old: machine.box.version,
|
||||
new: box.version))
|
||||
env[:box_outdated] = true
|
||||
return
|
||||
end
|
||||
|
||||
env[:box_outdated] = false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
module Vagrant
|
||||
module Action
|
||||
module Builtin
|
||||
class Disk
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
@logger = Log4r::Logger.new("vagrant::action::builtin::disk")
|
||||
end
|
||||
|
||||
def call(env)
|
||||
machine = env[:machine]
|
||||
defined_disks = get_disks(machine, env)
|
||||
|
||||
# Call into providers machine implementation for disk management
|
||||
if !defined_disks.empty?
|
||||
if machine.provider.capability?(:configure_disks)
|
||||
machine.provider.capability(:configure_disks, defined_disks)
|
||||
else
|
||||
env[:ui].warn(I18n.t("vagrant.actions.disk.provider_unsupported",
|
||||
provider: machine.provider_name))
|
||||
end
|
||||
end
|
||||
|
||||
# Continue On
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
def get_disks(machine, env)
|
||||
return @_disks if @_disks
|
||||
|
||||
@_disks = []
|
||||
@_disks = machine.config.vm.disks
|
||||
|
||||
@_disks
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -25,9 +25,27 @@ module Vagrant
|
|||
# Store in the type map so that --provision-with works properly
|
||||
@_provisioner_types[result] = provisioner.type
|
||||
|
||||
# Set top level provisioner name to provisioner configs name if top level name not set.
|
||||
# This is mostly for handling the shell provisioner, if a user has set its name like:
|
||||
#
|
||||
# config.vm.provision "shell", name: "my_provisioner"
|
||||
#
|
||||
# Where `name` is a shell config option, not a top level provisioner class option
|
||||
#
|
||||
# Note: `name` is set to a symbol, since it is converted to one via #Config::VM.provision
|
||||
provisioner_name = provisioner.name
|
||||
if !provisioner_name
|
||||
if provisioner.config.respond_to?(:name) &&
|
||||
provisioner.config.name
|
||||
provisioner_name = provisioner.config.name.to_sym
|
||||
end
|
||||
else
|
||||
provisioner_name = provisioner_name.to_sym
|
||||
end
|
||||
|
||||
# Build up the options
|
||||
options = {
|
||||
name: provisioner.name,
|
||||
name: provisioner_name,
|
||||
run: provisioner.run,
|
||||
before: provisioner.before,
|
||||
after: provisioner.after,
|
||||
|
|
|
@ -36,17 +36,35 @@ module Vagrant
|
|||
|
||||
# Get the command and wrap it in a login shell
|
||||
command = ShellQuote.escape(env[:ssh_run_command], "'")
|
||||
command = "#{env[:machine].config.ssh.shell} -c '#{command}'"
|
||||
|
||||
if env[:machine].config.vm.communicator == :winssh
|
||||
shell = env[:machine].config.winssh.shell
|
||||
else
|
||||
shell = env[:machine].config.ssh.shell
|
||||
end
|
||||
|
||||
if shell == "cmd"
|
||||
# Add an extra space to the command so cmd.exe quoting works
|
||||
# properly
|
||||
command = "#{shell} /C #{command} "
|
||||
elsif shell == "powershell"
|
||||
command = "$ProgressPreference = \"SilentlyContinue\"; #{command}"
|
||||
command = Base64.strict_encode64(command.encode("UTF-16LE", "UTF-8"))
|
||||
command = "#{shell} -encodedCommand #{command}"
|
||||
else
|
||||
command = "#{shell} -c '#{command}'"
|
||||
end
|
||||
|
||||
# Execute!
|
||||
opts = env[:ssh_opts] || {}
|
||||
opts[:extra_args] ||= []
|
||||
|
||||
# Allow the user to specify a tty or non-tty manually, but if they
|
||||
# don't then we default to a TTY
|
||||
# don't then we default to a TTY unless they are using WinSSH
|
||||
if !opts[:extra_args].include?("-t") &&
|
||||
!opts[:extra_args].include?("-T") &&
|
||||
env[:tty]
|
||||
env[:tty] &&
|
||||
env[:machine].config.vm.communicator != :winssh
|
||||
opts[:extra_args] << "-t"
|
||||
end
|
||||
|
||||
|
|
|
@ -47,7 +47,16 @@ module Vagrant
|
|||
raise Errors::VagrantInterrupt if env[:interrupted]
|
||||
action = @actions.shift
|
||||
@logger.info("Calling IN action: #{action}")
|
||||
|
||||
if !action.is_a?(Proc) && env[:hook]
|
||||
hook_name = action.class.name.split("::").last.
|
||||
gsub(/([a-z])([A-Z])/, '\1_\2').gsub('-', '_').downcase
|
||||
end
|
||||
|
||||
env[:hook].call("before_#{hook_name}".to_sym) if hook_name
|
||||
@stack.unshift(action).first.call(env)
|
||||
env[:hook].call("after_#{hook_name}".to_sym) if hook_name
|
||||
|
||||
raise Errors::VagrantInterrupt if env[:interrupted]
|
||||
@logger.info("Calling OUT action: #{action}")
|
||||
rescue SystemExit
|
||||
|
|
|
@ -68,11 +68,25 @@ module Vagrant
|
|||
|
||||
# Returns all the versions supported by this metadata. These
|
||||
# versions are sorted so the last element of the list is the
|
||||
# latest version.
|
||||
# latest version. Optionally filter versions by a matching
|
||||
# provider.
|
||||
#
|
||||
# @return[Array<String>]
|
||||
def versions
|
||||
@version_map.keys.sort.map(&:to_s)
|
||||
def versions(**opts)
|
||||
provider = nil
|
||||
provider = opts[:provider].to_sym if opts[:provider]
|
||||
|
||||
if provider
|
||||
@version_map.select do |version, raw|
|
||||
if raw["providers"]
|
||||
raw["providers"].detect do |p|
|
||||
p["name"].to_sym == provider
|
||||
end
|
||||
end
|
||||
end.keys.sort.map(&:to_s)
|
||||
else
|
||||
@version_map.keys.sort.map(&:to_s)
|
||||
end
|
||||
end
|
||||
|
||||
# Represents a single version within the metadata.
|
||||
|
|
|
@ -129,7 +129,7 @@ module Vagrant
|
|||
path = "(unknown)"
|
||||
if e.backtrace && e.backtrace[0]
|
||||
backtrace_tokens = e.backtrace[0].split(":")
|
||||
path = backtrace_tokens[0]
|
||||
path = e.backtrace.first.slice(0, e.backtrace.first.rindex(':')).rpartition(':').first
|
||||
backtrace_tokens.each do |part|
|
||||
if part =~ /\d+/
|
||||
line = part.to_i
|
||||
|
|
|
@ -436,6 +436,10 @@ module Vagrant
|
|||
error_key(:machine_action_locked)
|
||||
end
|
||||
|
||||
class MachineFolderNotAccessible < VagrantError
|
||||
error_key(:machine_folder_not_accessible)
|
||||
end
|
||||
|
||||
class MachineGuestNotReady < VagrantError
|
||||
error_key(:machine_guest_not_ready)
|
||||
end
|
||||
|
|
|
@ -329,10 +329,15 @@ module Vagrant
|
|||
target = opts[:target] if opts.key?(:target)
|
||||
target = "#{target}:" if target != ""
|
||||
|
||||
# Get the lines. The first default is because if the message
|
||||
# is an empty string, then we want to still use the empty string.
|
||||
lines = [message]
|
||||
lines = message.split("\n") if message != ""
|
||||
if message != ""
|
||||
lines = [].tap do |l|
|
||||
message.scan(/(.*?)(\n|$)/).each do |m|
|
||||
l << m.first if m.first != "" || (m.first == "" && m.last == "\n")
|
||||
end
|
||||
end
|
||||
lines << "" if message.end_with?("\n")
|
||||
end
|
||||
|
||||
# Otherwise, make sure to prefix every line properly
|
||||
lines.map do |line|
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
require "uri"
|
||||
|
||||
require "log4r"
|
||||
require "digest"
|
||||
require "digest/md5"
|
||||
require "digest/sha1"
|
||||
require "vagrant/util/busy"
|
||||
require "vagrant/util/platform"
|
||||
require "vagrant/util/subprocess"
|
||||
require "vagrant/util/curl_helper"
|
||||
require "vagrant/util/file_checksum"
|
||||
|
||||
module Vagrant
|
||||
module Util
|
||||
|
@ -20,12 +22,6 @@ module Vagrant
|
|||
# Vagrant/1.7.4 (+https://www.vagrantup.com; ruby2.1.0)
|
||||
USER_AGENT = "Vagrant/#{VERSION} (+https://www.vagrantup.com; #{RUBY_ENGINE}#{RUBY_VERSION}) #{ENV['VAGRANT_USER_AGENT_PROVISIONAL_STRING']}".freeze
|
||||
|
||||
# Supported file checksum
|
||||
CHECKSUM_MAP = {
|
||||
:md5 => Digest::MD5,
|
||||
:sha1 => Digest::SHA1
|
||||
}.freeze
|
||||
|
||||
# Hosts that do not require notification on redirect
|
||||
SILENCED_HOSTS = [
|
||||
"vagrantcloud.com".freeze,
|
||||
|
@ -68,8 +64,11 @@ module Vagrant
|
|||
@location_trusted = options[:location_trusted]
|
||||
@checksums = {
|
||||
:md5 => options[:md5],
|
||||
:sha1 => options[:sha1]
|
||||
}
|
||||
:sha1 => options[:sha1],
|
||||
:sha256 => options[:sha256],
|
||||
:sha384 => options[:sha384],
|
||||
:sha512 => options[:sha512]
|
||||
}.compact
|
||||
end
|
||||
|
||||
# This executes the actual download, downloading the source file
|
||||
|
@ -165,36 +164,23 @@ module Vagrant
|
|||
# @option checksums [String] :sha1 Compare SHA1 checksum
|
||||
# @return [Boolean]
|
||||
def validate_download!(source, path, checksums)
|
||||
CHECKSUM_MAP.each do |type, klass|
|
||||
if checksums[type]
|
||||
result = checksum_file(klass, path)
|
||||
@logger.debug("Validating checksum (#{type}) for #{source}. " \
|
||||
"expected: #{checksums[type]} actual: #{result}")
|
||||
if checksums[type] != result
|
||||
raise Errors::DownloaderChecksumError.new(
|
||||
source: source,
|
||||
path: path,
|
||||
type: type,
|
||||
expected_checksum: checksums[type],
|
||||
actual_checksum: result
|
||||
)
|
||||
end
|
||||
checksums.each do |type, expected|
|
||||
actual = FileChecksum.new(path, type).checksum
|
||||
@logger.debug("Validating checksum (#{type}) for #{source}. " \
|
||||
"expected: #{expected} actual: #{actual}")
|
||||
if actual.casecmp(expected) != 0
|
||||
raise Errors::DownloaderChecksumError.new(
|
||||
source: source,
|
||||
path: path,
|
||||
type: type,
|
||||
expected_checksum: expected,
|
||||
actual_checksum: actual
|
||||
)
|
||||
end
|
||||
end
|
||||
true
|
||||
end
|
||||
|
||||
# Generate checksum on given file
|
||||
#
|
||||
# @param digest_class [Class] Digest class to use for generating checksum
|
||||
# @param path [String, Pathname] Path to file
|
||||
# @return [String] hexdigest result
|
||||
def checksum_file(digest_class, path)
|
||||
digester = digest_class.new
|
||||
digester.file(path)
|
||||
digester.hexdigest
|
||||
end
|
||||
|
||||
def execute_curl(options, subprocess_options, &data_proc)
|
||||
options = options.dup
|
||||
options << subprocess_options
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
# passed into FileChecksum. Note that this isn't strictly enforced at
|
||||
# the moment, and this class isn't directly used. It is merely here for
|
||||
# documentation of structure of the class.
|
||||
|
||||
require "vagrant/errors"
|
||||
|
||||
class DigestClass
|
||||
def update(string); end
|
||||
def hexdigest; end
|
||||
|
@ -10,13 +13,27 @@ end
|
|||
class FileChecksum
|
||||
BUFFER_SIZE = 1024 * 8
|
||||
|
||||
# Supported file checksum
|
||||
CHECKSUM_MAP = {
|
||||
:md5 => Digest::MD5,
|
||||
:sha1 => Digest::SHA1,
|
||||
:sha256 => Digest::SHA256,
|
||||
:sha384 => Digest::SHA384,
|
||||
:sha512 => Digest::SHA512
|
||||
}.freeze
|
||||
|
||||
# Initializes an object to calculate the checksum of a file. The given
|
||||
# ``digest_klass`` should implement the ``DigestClass`` interface. Note
|
||||
# that the built-in Ruby digest classes duck type this properly:
|
||||
# Digest::MD5, Digest::SHA1, etc.
|
||||
def initialize(path, digest_klass)
|
||||
@digest_klass = digest_klass
|
||||
@path = path
|
||||
if digest_klass.is_a?(Class)
|
||||
@digest_klass = digest_klass
|
||||
else
|
||||
@digest_klass = load_digest(digest_klass)
|
||||
end
|
||||
|
||||
@path = path
|
||||
end
|
||||
|
||||
# This calculates the checksum of the file and returns it as a
|
||||
|
@ -40,6 +57,18 @@ class FileChecksum
|
|||
end
|
||||
end
|
||||
|
||||
return digest.hexdigest
|
||||
digest.hexdigest
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def load_digest(type)
|
||||
digest = CHECKSUM_MAP[type.to_s.to_sym]
|
||||
if digest.nil?
|
||||
raise Vagrant::Errors::BoxChecksumInvalidType,
|
||||
type: type.to_s,
|
||||
types: CHECKSUM_MAP.keys.join(', ')
|
||||
end
|
||||
digest
|
||||
end
|
||||
end
|
||||
|
|
|
@ -30,8 +30,7 @@ module Vagrant
|
|||
return true
|
||||
end
|
||||
rescue Timeout::Error, Errno::ECONNREFUSED, Errno::EHOSTUNREACH, \
|
||||
Errno::ENETUNREACH, Errno::EACCES, Errno::ENOTCONN, \
|
||||
Errno::EADDRNOTAVAIL
|
||||
Errno::ENETUNREACH, Errno::EACCES, Errno::ENOTCONN
|
||||
# Any of the above exceptions signal that the port is closed.
|
||||
return false
|
||||
end
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
require "log4r"
|
||||
|
||||
module Vagrant
|
||||
module Util
|
||||
class Numeric
|
||||
|
||||
# Authors Note: This conversion has been borrowed from the ActiveSupport Numeric class
|
||||
# Conversion helper constants
|
||||
KILOBYTE = 1024
|
||||
MEGABYTE = KILOBYTE * 1024
|
||||
GIGABYTE = MEGABYTE * 1024
|
||||
TERABYTE = GIGABYTE * 1024
|
||||
PETABYTE = TERABYTE * 1024
|
||||
EXABYTE = PETABYTE * 1024
|
||||
|
||||
BYTES_CONVERSION_MAP = {KB: KILOBYTE, MB: MEGABYTE, GB: GIGABYTE, TB: TERABYTE,
|
||||
PB: PETABYTE, EB: EXABYTE}
|
||||
|
||||
# Regex borrowed from the vagrant-disksize config class
|
||||
SHORTHAND_MATCH_REGEX = /^(?<number>[0-9]+)\s?(?<unit>KB|MB|GB|TB)?$/
|
||||
|
||||
class << self
|
||||
LOGGER = Log4r::Logger.new("vagrant::util::numeric")
|
||||
|
||||
# A helper that converts a shortcut string to its bytes representation.
|
||||
# The expected format of `str` is essentially: "<Number>XX"
|
||||
# Where `XX` is shorthand for KB, MB, GB, TB, PB, or EB. For example, 50 megabytes:
|
||||
#
|
||||
# str = "50MB"
|
||||
#
|
||||
# @param [String] - str
|
||||
# @return [Integer,nil] - bytes - returns nil if method fails to convert to bytes
|
||||
def string_to_bytes(str)
|
||||
bytes = nil
|
||||
|
||||
str = str.to_s.strip
|
||||
matches = SHORTHAND_MATCH_REGEX.match(str)
|
||||
if matches
|
||||
number = matches[:number].to_i
|
||||
unit = matches[:unit].to_sym
|
||||
|
||||
if BYTES_CONVERSION_MAP.key?(unit)
|
||||
bytes = number * BYTES_CONVERSION_MAP[unit]
|
||||
else
|
||||
LOGGER.error("An invalid unit or format was given, string_to_bytes cannot convert #{str}")
|
||||
end
|
||||
end
|
||||
|
||||
bytes
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for platform. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
def reset!
|
||||
instance_variables.each(&method(:remove_instance_variable))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -26,6 +26,10 @@ module VagrantPlugins
|
|||
options[:global] = g
|
||||
end
|
||||
|
||||
o.on("-f", "--force", "Force checks for latest box updates") do |f|
|
||||
options[:force] = f
|
||||
end
|
||||
|
||||
build_download_options(o, download_options)
|
||||
end
|
||||
|
||||
|
@ -40,7 +44,7 @@ module VagrantPlugins
|
|||
|
||||
with_target_vms(argv) do |machine|
|
||||
@env.action_runner.run(Vagrant::Action.action_box_outdated, {
|
||||
box_outdated_force: true,
|
||||
box_outdated_force: options[:force],
|
||||
box_outdated_refresh: true,
|
||||
box_outdated_success_ui: true,
|
||||
machine: machine,
|
||||
|
@ -73,7 +77,15 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
current = Gem::Version.new(box.version)
|
||||
latest = Gem::Version.new(md.versions.last)
|
||||
box_versions = md.versions(provider: box.provider)
|
||||
|
||||
if box_versions.empty?
|
||||
latest_box_version = box_versions.last.to_i
|
||||
else
|
||||
latest_box_version = box_versions.last
|
||||
end
|
||||
|
||||
latest = Gem::Version.new(latest_box_version)
|
||||
if latest <= current
|
||||
@env.ui.success(I18n.t(
|
||||
"vagrant.box_up_to_date",
|
||||
|
|
|
@ -144,7 +144,7 @@ en:
|
|||
unauthorized: |-
|
||||
Invalid username or password. Please try again.
|
||||
unexpected_error: |-
|
||||
An unexpected error occured: %{error}
|
||||
An unexpected error occurred: %{error}
|
||||
|
||||
check_logged_in: |-
|
||||
You are already logged in.
|
||||
|
|
|
@ -19,6 +19,12 @@ module VagrantPlugins
|
|||
o.on("-u", "--username USERNAME_OR_EMAIL", String, "Vagrant Cloud username or email address") do |u|
|
||||
options[:username] = u
|
||||
end
|
||||
o.on("-c", "--checksum CHECKSUM_VALUE", String, "Checksum of the box for this provider. --checksum-type option is required.") do |c|
|
||||
options[:checksum] = c
|
||||
end
|
||||
o.on("-C", "--checksum-type TYPE", String, "Type of checksum used (md5, sha1, sha256, sha384, sha512). --checksum option is required.") do |c|
|
||||
options[:checksum_type] = c
|
||||
end
|
||||
end
|
||||
|
||||
# Parse the options
|
||||
|
@ -52,16 +58,17 @@ module VagrantPlugins
|
|||
account = VagrantPlugins::CloudCommand::Util.account(org, access_token, server_url)
|
||||
box = VagrantCloud::Box.new(account, box_name, nil, nil, nil, access_token)
|
||||
cloud_version = VagrantCloud::Version.new(box, version, nil, nil, access_token)
|
||||
provider = VagrantCloud::Provider.new(cloud_version, provider_name, nil, url, org, box_name, access_token)
|
||||
provider = VagrantCloud::Provider.new(cloud_version, provider_name, nil, url, org, box_name,
|
||||
access_token, nil, options[:checksum], options[:checksum_type])
|
||||
|
||||
begin
|
||||
success = provider.create_provider
|
||||
@env.ui.success(I18n.t("cloud_command.provider.create_success", provider:provider_name, org: org, box_name: box_name, version: version))
|
||||
success = success.delete_if{|_, v|v.nil?}
|
||||
@env.ui.success(I18n.t("cloud_command.provider.create_success", provider: provider_name, org: org, box_name: box_name, version: version))
|
||||
success = success.compact
|
||||
VagrantPlugins::CloudCommand::Util.format_box_results(success, @env)
|
||||
return 0
|
||||
rescue VagrantCloud::ClientError => e
|
||||
@env.ui.error(I18n.t("cloud_command.errors.provider.create_fail", provider:provider_name, org: org, box_name: box_name, version: version))
|
||||
@env.ui.error(I18n.t("cloud_command.errors.provider.create_fail", provider: provider_name, org: org, box_name: box_name, version: version))
|
||||
@env.ui.error(e)
|
||||
return 1
|
||||
end
|
||||
|
|
|
@ -19,6 +19,12 @@ module VagrantPlugins
|
|||
o.on("-u", "--username USERNAME_OR_EMAIL", String, "Vagrant Cloud username or email address") do |u|
|
||||
options[:username] = u
|
||||
end
|
||||
o.on("-c", "--checksum CHECKSUM_VALUE", String, "Checksum of the box for this provider. --checksum-type option is required.") do |c|
|
||||
options[:checksum] = c
|
||||
end
|
||||
o.on("-C", "--checksum-type TYPE", String, "Type of checksum used (md5, sha1, sha256, sha384, sha512). --checksum option is required.") do |c|
|
||||
options[:checksum_type] = c
|
||||
end
|
||||
end
|
||||
|
||||
# Parse the options
|
||||
|
@ -52,7 +58,8 @@ module VagrantPlugins
|
|||
account = VagrantPlugins::CloudCommand::Util.account(org, access_token, server_url)
|
||||
box = VagrantCloud::Box.new(account, box_name, nil, nil, nil, access_token)
|
||||
cloud_version = VagrantCloud::Version.new(box, version, nil, nil, access_token)
|
||||
provider = VagrantCloud::Provider.new(cloud_version, provider_name, nil, url, org, box_name, access_token)
|
||||
provider = VagrantCloud::Provider.new(cloud_version, provider_name, nil, url, org, box_name,
|
||||
access_token, nil, options[:checksum], options[:checksum_type])
|
||||
|
||||
begin
|
||||
success = provider.update
|
||||
|
|
|
@ -43,13 +43,19 @@ module VagrantPlugins
|
|||
o.on("-u", "--username USERNAME_OR_EMAIL", String, "Vagrant Cloud username or email address") do |u|
|
||||
options[:username] = u
|
||||
end
|
||||
o.on("-c", "--checksum CHECKSUM_VALUE", String, "Checksum of the box for this provider. --checksum-type option is required.") do |c|
|
||||
options[:checksum] = c
|
||||
end
|
||||
o.on("-C", "--checksum-type TYPE", String, "Type of checksum used (md5, sha1, sha256, sha384, sha512). --checksum option is required.") do |c|
|
||||
options[:checksum_type] = c
|
||||
end
|
||||
end
|
||||
|
||||
# Parse the options
|
||||
argv = parse_options(opts)
|
||||
return if !argv
|
||||
|
||||
if argv.empty? || argv.length > 4 || argv.length < 3
|
||||
if argv.empty? || argv.length > 4 || argv.length < 3 || (argv.length == 3 && !options[:url])
|
||||
raise Vagrant::Errors::CLIInvalidUsage,
|
||||
help: opts.help.chomp
|
||||
end
|
||||
|
@ -97,7 +103,8 @@ module VagrantPlugins
|
|||
account = VagrantPlugins::CloudCommand::Util.account(org, access_token, server_url)
|
||||
box = VagrantCloud::Box.new(account, box_name, nil, options[:short_description], options[:description], access_token)
|
||||
cloud_version = VagrantCloud::Version.new(box, version, nil, options[:version_description], access_token)
|
||||
provider = VagrantCloud::Provider.new(cloud_version, provider_name, nil, options[:url], org, box_name, access_token)
|
||||
provider = VagrantCloud::Provider.new(cloud_version, provider_name, nil, options[:url], org, box_name,
|
||||
access_token, nil, options[:checksum], options[:checksum_type])
|
||||
|
||||
ui = Vagrant::UI::Prefixed.new(@env.ui, "cloud")
|
||||
|
||||
|
|
|
@ -15,6 +15,9 @@ module VagrantPlugins
|
|||
o.separator "can be restored via `vagrant snapshot restore` at any point in the"
|
||||
o.separator "future to get back to this exact machine state."
|
||||
o.separator ""
|
||||
o.separator "If no vm-name is given, Vagrant will take a snapshot of"
|
||||
o.separator "the entire environment with the same snapshot name."
|
||||
o.separator ""
|
||||
o.separator "Snapshots are useful for experimenting in a machine and being able"
|
||||
o.separator "to rollback quickly."
|
||||
|
||||
|
@ -32,11 +35,21 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
name = argv.pop
|
||||
|
||||
with_target_vms(argv) do |vm|
|
||||
if !vm.provider.capability?(:snapshot_list)
|
||||
raise Vagrant::Errors::SnapshotNotSupported
|
||||
end
|
||||
|
||||
# In this case, no vm name was given, and we are iterating over the
|
||||
# entire environment. If a vm hasn't been created yet, we can't list
|
||||
# its snapshots
|
||||
if vm.id.nil?
|
||||
@env.ui.warn(I18n.t("vagrant.commands.snapshot.save.vm_not_created",
|
||||
name: vm.name))
|
||||
next
|
||||
end
|
||||
|
||||
snapshot_list = vm.provider.capability(:snapshot_list)
|
||||
|
||||
if !snapshot_list.include? name
|
||||
|
|
|
@ -8,7 +8,7 @@ module VagrantPlugins
|
|||
|
||||
def self.rsync_install(machine)
|
||||
machine.communicate.tap do |comm|
|
||||
comm.sudo('apk add rsync')
|
||||
comm.sudo('apk add --update-cache rsync')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -45,6 +45,22 @@ module VagrantPlugins
|
|||
require_relative 'cap/smb'
|
||||
Cap::SMB
|
||||
end
|
||||
|
||||
def self.check_community_plugin
|
||||
plugins = Vagrant::Plugin::Manager.instance.installed_plugins
|
||||
if plugins.keys.include?("vagrant-alpine")
|
||||
$stderr.puts <<-EOF
|
||||
WARNING: Vagrant has detected the `vagrant-alpine` plugin. This plugin's
|
||||
functionality has been merged into the main Vagrant project and should be
|
||||
considered deprecated. To uninstall the plugin, run the command shown below:
|
||||
|
||||
vagrant plugin uninstall vagrant-alpine
|
||||
|
||||
EOF
|
||||
end
|
||||
end
|
||||
|
||||
self.check_community_plugin
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
require "securerandom"
|
||||
|
||||
module VagrantPlugins
|
||||
module GuestDarwin
|
||||
module Cap
|
||||
|
@ -5,31 +7,102 @@ module VagrantPlugins
|
|||
|
||||
# we seem to be unable to ask 'mount -t vmhgfs' to mount the roots
|
||||
# of specific shares, so instead we symlink from what is already
|
||||
# mounted by the guest tools
|
||||
# mounted by the guest tools
|
||||
# (ie. the behaviour of the VMware_fusion provider prior to 0.8.x)
|
||||
|
||||
def self.mount_vmware_shared_folder(machine, name, guestpath, options)
|
||||
# Use this variable to determine which machines
|
||||
# have been registered with after hook
|
||||
@apply_firmlinks ||= Hash.new{ |h, k| h[k] = {bootstrap: false, content: []} }
|
||||
|
||||
machine.communicate.tap do |comm|
|
||||
# clear prior symlink
|
||||
if comm.test("test -L \"#{guestpath}\"", sudo: true)
|
||||
comm.sudo("rm -f \"#{guestpath}\"")
|
||||
# check if we are dealing with an APFS root container
|
||||
if comm.test("test -d /System/Volumes/Data")
|
||||
parts = Pathname.new(guestpath).descend.to_a
|
||||
firmlink = parts[1].to_s
|
||||
firmlink.slice!(0, 1) if firmlink.start_with?("/")
|
||||
if parts.size > 2
|
||||
guestpath = File.join("/System/Volumes/Data", guestpath)
|
||||
else
|
||||
guestpath = nil
|
||||
end
|
||||
end
|
||||
|
||||
# clear prior directory if exists
|
||||
if comm.test("test -d \"#{guestpath}\"", sudo: true)
|
||||
comm.sudo("rm -Rf \"#{guestpath}\"")
|
||||
# Remove existing symlink or directory if defined
|
||||
if guestpath
|
||||
if comm.test("test -L \"#{guestpath}\"")
|
||||
comm.sudo("rm -f \"#{guestpath}\"")
|
||||
elsif comm.test("test -d \"#{guestpath}\"")
|
||||
comm.sudo("rm -Rf \"#{guestpath}\"")
|
||||
end
|
||||
|
||||
# create intermediate directories if needed
|
||||
intermediate_dir = File.dirname(guestpath)
|
||||
if intermediate_dir != "/"
|
||||
comm.sudo("mkdir -p \"#{intermediate_dir}\"")
|
||||
end
|
||||
|
||||
comm.sudo("ln -s \"/Volumes/VMware Shared Folders/#{name}\" \"#{guestpath}\"")
|
||||
end
|
||||
|
||||
# create intermediate directories if needed
|
||||
intermediate_dir = File.dirname(guestpath)
|
||||
if !comm.test("test -d \"#{intermediate_dir}\"", sudo: true)
|
||||
comm.sudo("mkdir -p \"#{intermediate_dir}\"")
|
||||
end
|
||||
if firmlink && !system_firmlink?(firmlink)
|
||||
if guestpath.nil?
|
||||
guestpath = "/Volumes/VMware Shared Folders/#{name}"
|
||||
else
|
||||
guestpath = File.join("/System/Volumes/Data", firmlink)
|
||||
end
|
||||
|
||||
# finally make the symlink
|
||||
comm.sudo("ln -s \"/Volumes/VMware Shared Folders/#{name}\" \"#{guestpath}\"")
|
||||
share_line = "#{firmlink}\t#{guestpath}"
|
||||
|
||||
# Check if the line is already defined. If so, bail since we are done
|
||||
if !comm.test("[[ \"$(</etc/synthetic.conf)\" = *\"#{share_line}\"* ]]")
|
||||
@apply_firmlinks[machine.id][:bootstrap] = true
|
||||
end
|
||||
|
||||
# If we haven't already added our hook to apply firmlinks, do it now
|
||||
if @apply_firmlinks[machine.id][:content].empty?
|
||||
Plugin.action_hook(:apfs_firmlinks, :after_synced_folders) do |hook|
|
||||
action = proc { |*_|
|
||||
content = @apply_firmlinks[machine.id][:content].join("\n")
|
||||
# Write out the synthetic file
|
||||
comm.sudo("echo -e #{content.inspect} > /etc/synthetic.conf")
|
||||
if @apply_firmlinks[:bootstrap]
|
||||
# Re-bootstrap the root container to pick up firmlink updates
|
||||
comm.sudo("/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B")
|
||||
end
|
||||
}
|
||||
hook.prepend(action)
|
||||
end
|
||||
end
|
||||
@apply_firmlinks[machine.id][:content] << share_line
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Check if firmlink is provided by the system
|
||||
#
|
||||
# @param [String] firmlink Firmlink path
|
||||
# @return [Boolean]
|
||||
def self.system_firmlink?(firmlink)
|
||||
if !@_firmlinks
|
||||
if File.exist?("/usr/share/firmlinks")
|
||||
@_firmlinks = File.readlines("/usr/share/firmlinks").map do |line|
|
||||
line.split.first
|
||||
end
|
||||
else
|
||||
@_firmlinks = []
|
||||
end
|
||||
end
|
||||
firmlink = "/#{firmlink}" if !firmlink.start_with?("/")
|
||||
@_firmlinks.include?(firmlink)
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for capability. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
def self.reset!
|
||||
instance_variables.each(&method(:remove_instance_variable))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
require "log4r"
|
||||
|
||||
module VagrantPlugins
|
||||
module GuestLinux
|
||||
module Cap
|
||||
class Reboot
|
||||
MAX_REBOOT_RETRY_DURATION = 120
|
||||
|
||||
def self.reboot(machine)
|
||||
@logger = Log4r::Logger.new("vagrant::linux::reboot")
|
||||
reboot_script = "reboot"
|
||||
|
||||
comm = machine.communicate
|
||||
|
||||
@logger.debug("Issuing reboot command for guest")
|
||||
comm.sudo(reboot_script)
|
||||
|
||||
machine.ui.info(I18n.t("vagrant.guests.capabilities.rebooting"))
|
||||
|
||||
@logger.debug("Waiting for machine to finish rebooting")
|
||||
|
||||
wait_remaining = MAX_REBOOT_RETRY_DURATION
|
||||
begin
|
||||
wait_for_reboot(machine)
|
||||
rescue Vagrant::Errors::MachineGuestNotReady => e
|
||||
raise if wait_remaining < 0
|
||||
@logger.warn("Machine not ready, cannot start reboot yet. Trying again")
|
||||
sleep(5)
|
||||
wait_remaining -= 5
|
||||
retry
|
||||
end
|
||||
end
|
||||
|
||||
def self.wait_for_reboot(machine)
|
||||
while !machine.guest.ready?
|
||||
sleep 10
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -82,6 +82,16 @@ module VagrantPlugins
|
|||
Cap::ReadIPAddress
|
||||
end
|
||||
|
||||
guest_capability(:linux, :wait_for_reboot) do
|
||||
require_relative "cap/reboot"
|
||||
Cap::Reboot
|
||||
end
|
||||
|
||||
guest_capability(:linux, :reboot) do
|
||||
require_relative "cap/reboot"
|
||||
Cap::Reboot
|
||||
end
|
||||
|
||||
guest_capability(:linux, :remove_public_key) do
|
||||
require_relative "cap/public_key"
|
||||
Cap::PublicKey
|
||||
|
|
|
@ -5,7 +5,7 @@ module VagrantPlugins
|
|||
def self.nfs_client_install(machine)
|
||||
machine.communicate.sudo <<-EOH.gsub(/^ {12}/, '')
|
||||
if command -v dnf; then
|
||||
if `dnf info -q libnfs-utils > /dev/null 2>&1` ; then
|
||||
if `dnf info -q libnfs-utils > /dev/null 2>&1` ; then
|
||||
dnf -y install nfs-utils libnfs-utils portmap
|
||||
else
|
||||
dnf -y install nfs-utils nfs-utils-lib portmap
|
||||
|
@ -15,7 +15,7 @@ module VagrantPlugins
|
|||
fi
|
||||
|
||||
if test $(ps -o comm= 1) == 'systemd'; then
|
||||
/bin/systemctl restart rpcbind nfs
|
||||
/bin/systemctl restart rpcbind nfs-server
|
||||
else
|
||||
/etc/init.d/rpcbind restart
|
||||
/etc/init.d/nfs restart
|
||||
|
|
|
@ -5,11 +5,10 @@ module VagrantPlugins
|
|||
def self.change_host_name(machine, name)
|
||||
comm = machine.communicate
|
||||
|
||||
if !comm.test("hostname -f | grep '^#{name}$'", sudo: false)
|
||||
basename = name.split(".", 2)[0]
|
||||
basename = name.split(".", 2)[0]
|
||||
if !comm.test('test "$(hostnamectl --static status)" = "#{basename}"', sudo: false)
|
||||
comm.sudo <<-EOH.gsub(/^ {14}/, '')
|
||||
echo '#{basename}' > /etc/HOSTNAME
|
||||
hostname '#{basename}'
|
||||
hostnamectl set-hostname '#{basename}'
|
||||
|
||||
# Prepend ourselves to /etc/hosts
|
||||
grep -w '#{name}' /etc/hosts || {
|
||||
|
|
|
@ -5,8 +5,8 @@ module VagrantPlugins
|
|||
def self.nfs_client_install(machine)
|
||||
machine.communicate.sudo <<-EOH.gsub(/^ {12}/, '')
|
||||
zypper -n install nfs-client
|
||||
/sbin/service rpcbind restart
|
||||
/sbin/service nfs restart
|
||||
/usr/bin/systemctl restart rpcbind
|
||||
/usr/bin/systemctl restart nfs-client.target
|
||||
EOH
|
||||
end
|
||||
end
|
||||
|
|
|
@ -28,7 +28,7 @@ module VagrantPlugins
|
|||
wait_remaining = MAX_REBOOT_RETRY_DURATION
|
||||
begin
|
||||
wait_for_reboot(machine)
|
||||
rescue Vagrant::Errors::MachineGuestNotReady, WinRM::WinRMHTTPTransportError => e
|
||||
rescue HTTPClient::ConnectTimeoutError, Vagrant::Errors::MachineGuestNotReady, WinRM::WinRMHTTPTransportError => e
|
||||
raise if wait_remaining < 0
|
||||
@logger.warn("Machine not ready, cannot start reboot yet. Trying again")
|
||||
sleep(5)
|
||||
|
|
|
@ -26,8 +26,8 @@ module VagrantPlugins
|
|||
logger.debug("Compiling map of sub-directories for NFS exports...")
|
||||
dirmap = {}
|
||||
folders.sort_by { |_, opts| opts[:hostpath] }.each do |_, opts|
|
||||
opts[:hostpath] = environment.host.capability(:resolve_host_path, opts[:hostpath].gsub('"', '\"'))
|
||||
hostpath = opts[:hostpath].dup
|
||||
hostpath.gsub!('"', '\"')
|
||||
|
||||
found = false
|
||||
dirmap.each do |dirs, diropts|
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
module VagrantPlugins
|
||||
module HostBSD
|
||||
module Cap
|
||||
class Path
|
||||
def self.resolve_host_path(env, path)
|
||||
path
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -36,6 +36,11 @@ module VagrantPlugins
|
|||
Cap::NFS
|
||||
end
|
||||
|
||||
host_capability("bsd", "resolve_host_path") do
|
||||
require_relative "cap/path"
|
||||
Cap::Path
|
||||
end
|
||||
|
||||
host_capability("bsd", "set_ssh_key_permissions") do
|
||||
require_relative "cap/ssh"
|
||||
Cap::SSH
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
module VagrantPlugins
|
||||
module HostDarwin
|
||||
module Cap
|
||||
class NFS
|
||||
def self.nfs_exports_template(environment)
|
||||
"nfs/exports_darwin"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,58 @@
|
|||
module VagrantPlugins
|
||||
module HostDarwin
|
||||
module Cap
|
||||
class Path
|
||||
@@logger = Log4r::Logger.new("vagrant::host::darwin::path")
|
||||
|
||||
FIRMLINK_DEFS = "/usr/share/firmlinks".freeze
|
||||
FIRMLINK_DATA_PATH = "/System/Volumes/Data".freeze
|
||||
|
||||
# Resolve the given host path to the actual
|
||||
# usable system path by detecting firmlinks
|
||||
# if available on the current system
|
||||
#
|
||||
# @param [String] path Host system path
|
||||
# @return [String] resolved path
|
||||
def self.resolve_host_path(env, path)
|
||||
path = File.expand_path(path)
|
||||
firmlink = firmlink_map.detect do |mount_path, data_path|
|
||||
path.start_with?(mount_path)
|
||||
end
|
||||
return path if firmlink.nil?
|
||||
current_prefix, new_suffix = firmlink
|
||||
new_prefix = File.join(FIRMLINK_DATA_PATH, new_suffix)
|
||||
new_path = path.sub(current_prefix, new_prefix)
|
||||
@@logger.debug("Resolved given path `#{path}` to `#{new_path}`")
|
||||
new_path
|
||||
end
|
||||
|
||||
# Generate mapping of firmlinks if available on the host
|
||||
#
|
||||
# @return [Hash<String,String>]
|
||||
def self.firmlink_map
|
||||
if !@firmlink_map
|
||||
return @firmlink_map = {} if !File.exist?(FIRMLINK_DEFS)
|
||||
begin
|
||||
@firmlink_map = Hash[
|
||||
File.readlines(FIRMLINK_DEFS).map { |d|
|
||||
d.strip.split(/\s+/, 2)
|
||||
}
|
||||
]
|
||||
rescue => err
|
||||
@@logger.warn("Failed to parse firmlink definitions: #{err}")
|
||||
@firmlink_map = {}
|
||||
end
|
||||
end
|
||||
@firmlink_map
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for capability. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
def self.reset!
|
||||
instance_variables.each(&method(:remove_instance_variable))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -16,6 +16,11 @@ module VagrantPlugins
|
|||
Cap::ProviderInstallVirtualBox
|
||||
end
|
||||
|
||||
host_capability("darwin", "resolve_host_path") do
|
||||
require_relative "cap/path"
|
||||
Cap::Path
|
||||
end
|
||||
|
||||
host_capability("darwin", "rdp_client") do
|
||||
require_relative "cap/rdp"
|
||||
Cap::RDP
|
||||
|
@ -50,6 +55,11 @@ module VagrantPlugins
|
|||
require_relative "cap/configured_ip_addresses"
|
||||
Cap::ConfiguredIPAddresses
|
||||
end
|
||||
|
||||
host_capability("darwin", "nfs_exports_template") do
|
||||
require_relative "cap/nfs"
|
||||
Cap::NFS
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -77,7 +77,7 @@ module VagrantPlugins
|
|||
sleep 0.5
|
||||
|
||||
nfs_cleanup("#{Process.uid} #{id}")
|
||||
output = "#{nfs_exports_content}\n#{output}"
|
||||
output = nfs_exports_content + output
|
||||
nfs_write_exports(output)
|
||||
|
||||
if nfs_running?(nfs_check_command)
|
||||
|
@ -93,7 +93,7 @@ module VagrantPlugins
|
|||
"systemctl --no-pager --no-legend --plain list-unit-files --all --type=service " \
|
||||
"| grep #{nfs_service_name_systemd}").exit_code == 0
|
||||
else
|
||||
Vagrant::Util::Subprocess.execute("modinfo", "nfsd").exit_code == 0 ||
|
||||
Vagrant::Util::Subprocess.execute(modinfo_path, "nfsd").exit_code == 0 ||
|
||||
Vagrant::Util::Subprocess.execute("grep", "nfsd", "/proc/filesystems").exit_code == 0
|
||||
end
|
||||
end
|
||||
|
@ -261,6 +261,24 @@ module VagrantPlugins
|
|||
Vagrant::Util::Subprocess.execute(*Shellwords.split(check_command)).exit_code == 0
|
||||
end
|
||||
|
||||
def self.modinfo_path
|
||||
if !defined?(@_modinfo_path)
|
||||
@_modinfo_path = Vagrant::Util::Which.which("modinfo")
|
||||
|
||||
if @_modinfo_path.to_s.empty?
|
||||
path = "/sbin/modinfo"
|
||||
if File.file?(path)
|
||||
@_modinfo_path = path
|
||||
end
|
||||
end
|
||||
|
||||
if @_modinfo_path.to_s.empty?
|
||||
@_modinfo_path = "modinfo"
|
||||
end
|
||||
end
|
||||
@_modinfo_path
|
||||
end
|
||||
|
||||
# @private
|
||||
# Reset the cached values for capability. This is not considered a public
|
||||
# API and should only be used for testing.
|
||||
|
|
|
@ -7,11 +7,11 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
def self.nfs_check_command(env)
|
||||
"/sbin/service nfsserver status"
|
||||
"systemctl status --no-pager nfs-server"
|
||||
end
|
||||
|
||||
def self.nfs_start_command(env)
|
||||
"/sbin/service nfsserver start"
|
||||
"systemctl start --no-pager nfs-server"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
require "log4r"
|
||||
require "securerandom"
|
||||
|
||||
require "vagrant/util/numeric"
|
||||
|
||||
module VagrantPlugins
|
||||
module Kernel_V2
|
||||
class VagrantConfigDisk < Vagrant.plugin("2", :config)
|
||||
#-------------------------------------------------------------------
|
||||
# Config class for a given Disk
|
||||
#-------------------------------------------------------------------
|
||||
|
||||
DEFAULT_DISK_TYPES = [:disk, :dvd, :floppy].freeze
|
||||
|
||||
# Note: This value is for internal use only
|
||||
#
|
||||
# @return [String]
|
||||
attr_reader :id
|
||||
|
||||
# File name for the given disk. Defaults to a generated name that is:
|
||||
#
|
||||
# vagrant_<disk_type>_<short_uuid>
|
||||
#
|
||||
# @return [String]
|
||||
attr_accessor :name
|
||||
|
||||
# Type of disk to create. Defaults to `:disk`
|
||||
#
|
||||
# @return [Symbol]
|
||||
attr_accessor :type
|
||||
|
||||
# Size of disk to create
|
||||
#
|
||||
# @return [Integer,String]
|
||||
attr_accessor :size
|
||||
|
||||
# Path to the location of the disk file (Optional)
|
||||
#
|
||||
# @return [String]
|
||||
attr_accessor :file
|
||||
|
||||
# Determines if this disk is the _main_ disk, or an attachment.
|
||||
# Defaults to true.
|
||||
#
|
||||
# @return [Boolean]
|
||||
attr_accessor :primary
|
||||
|
||||
# Provider specific options
|
||||
#
|
||||
# @return [Hash]
|
||||
attr_accessor :provider_config
|
||||
|
||||
def initialize(type)
|
||||
@logger = Log4r::Logger.new("vagrant::config::vm::disk")
|
||||
|
||||
@type = type
|
||||
@provider_config = {}
|
||||
|
||||
@name = UNSET_VALUE
|
||||
@provider_type = UNSET_VALUE
|
||||
@size = UNSET_VALUE
|
||||
@primary = UNSET_VALUE
|
||||
@file = UNSET_VALUE
|
||||
|
||||
# Internal options
|
||||
@id = SecureRandom.uuid
|
||||
end
|
||||
|
||||
# Helper method for storing provider specific config options
|
||||
#
|
||||
# Expected format is:
|
||||
#
|
||||
# - `provider__diskoption: value`
|
||||
# - `{provider: {diskoption: value, otherdiskoption: value, ...}`
|
||||
#
|
||||
# Duplicates will be overriden
|
||||
#
|
||||
# @param [Hash] options
|
||||
def add_provider_config(**options, &block)
|
||||
current = {}
|
||||
options.each do |k,v|
|
||||
opts = k.to_s.split("__")
|
||||
|
||||
if opts.size == 2
|
||||
current[opts[0].to_sym] = {opts[1].to_sym => v}
|
||||
elsif v.is_a?(Hash)
|
||||
current[k] = v
|
||||
else
|
||||
@logger.warn("Disk option '#{k}' found that does not match expected provider disk config schema.")
|
||||
end
|
||||
end
|
||||
|
||||
current = @provider_config.merge(current) if !@provider_config.empty?
|
||||
@provider_config = current
|
||||
end
|
||||
|
||||
def finalize!
|
||||
# Ensure all config options are set to nil or default value if untouched
|
||||
# by user
|
||||
@type = :disk if @type == UNSET_VALUE
|
||||
@size = nil if @size == UNSET_VALUE
|
||||
@file = nil if @file == UNSET_VALUE
|
||||
|
||||
if @primary == UNSET_VALUE
|
||||
@primary = false
|
||||
end
|
||||
|
||||
if @name == UNSET_VALUE
|
||||
if @primary
|
||||
@name = "vagrant_primary"
|
||||
else
|
||||
@name = "name_#{@type.to_s}_#{@id.split("-").last}"
|
||||
end
|
||||
end
|
||||
|
||||
@provider_config = nil if @provider_config == {}
|
||||
end
|
||||
|
||||
# @return [Array] array of strings of error messages from config option validation
|
||||
def validate(machine)
|
||||
errors = _detected_errors
|
||||
|
||||
# validate type with list of known disk types
|
||||
|
||||
if !DEFAULT_DISK_TYPES.include?(@type)
|
||||
errors << I18n.t("vagrant.config.disk.invalid_type", type: @type,
|
||||
types: DEFAULT_DISK_TYPES.join(', '))
|
||||
end
|
||||
|
||||
if @size && !@size.is_a?(Integer)
|
||||
if @size.is_a?(String)
|
||||
@size = Vagrant::Util::Numeric.string_to_bytes(@size)
|
||||
end
|
||||
|
||||
if !@size
|
||||
errors << I18n.t("vagrant.config.disk.invalid_size", name: @name, machine: machine.name)
|
||||
end
|
||||
end
|
||||
|
||||
if @file
|
||||
if !@file.is_a?(String)
|
||||
errors << I18n.t("vagrant.config.disk.invalid_file_type", file: @file, machine: machine.name)
|
||||
elsif !File.file?(@file)
|
||||
errors << I18n.t("vagrant.config.disk.missing_file", file_path: @file,
|
||||
name: @name, machine: machine.name)
|
||||
end
|
||||
end
|
||||
|
||||
if @provider_config
|
||||
if !@provider_config.keys.include?(machine.provider_name)
|
||||
machine.env.ui.warn(I18n.t("vagrant.config.disk.missing_provider",
|
||||
machine: machine.name,
|
||||
provider_name: machine.provider_name))
|
||||
end
|
||||
end
|
||||
|
||||
errors
|
||||
end
|
||||
|
||||
# The String representation of this Disk.
|
||||
#
|
||||
# @return [String]
|
||||
def to_s
|
||||
"disk config"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -11,6 +11,7 @@ require "vagrant/util/experimental"
|
|||
|
||||
require File.expand_path("../vm_provisioner", __FILE__)
|
||||
require File.expand_path("../vm_subvm", __FILE__)
|
||||
require File.expand_path("../disk", __FILE__)
|
||||
|
||||
module VagrantPlugins
|
||||
module Kernel_V2
|
||||
|
@ -43,6 +44,7 @@ module VagrantPlugins
|
|||
attr_accessor :post_up_message
|
||||
attr_accessor :usable_port_range
|
||||
attr_reader :provisioners
|
||||
attr_reader :disks
|
||||
|
||||
# This is an experimental feature that isn't public yet.
|
||||
attr_accessor :clone
|
||||
|
@ -73,6 +75,7 @@ module VagrantPlugins
|
|||
@hostname = UNSET_VALUE
|
||||
@post_up_message = UNSET_VALUE
|
||||
@provisioners = []
|
||||
@disks = []
|
||||
@usable_port_range = UNSET_VALUE
|
||||
|
||||
# Internal state
|
||||
|
@ -123,6 +126,28 @@ module VagrantPlugins
|
|||
end
|
||||
end
|
||||
|
||||
# Merge defined disks
|
||||
other_disks = other.instance_variable_get(:@disks)
|
||||
new_disks = []
|
||||
@disks.each do |p|
|
||||
other_p = other_disks.find { |o| p.id == o.id }
|
||||
if other_p
|
||||
# there is an override. take it.
|
||||
other_p.config = p.config.merge(other_p.config)
|
||||
|
||||
# Remove duplicate disk config from other
|
||||
p = other_p
|
||||
other_disks.delete(other_p)
|
||||
end
|
||||
|
||||
# there is an override, merge it into the
|
||||
new_disks << p.dup
|
||||
end
|
||||
other_disks.each do |p|
|
||||
new_disks << p.dup
|
||||
end
|
||||
result.instance_variable_set(:@disks, new_disks)
|
||||
|
||||
# Merge the providers by prepending any configuration blocks we
|
||||
# have for providers onto the new configuration.
|
||||
other_providers = other.instance_variable_get(:@__providers)
|
||||
|
@ -384,6 +409,38 @@ module VagrantPlugins
|
|||
@__defined_vms[name].config_procs << [options[:config_version], block] if block
|
||||
end
|
||||
|
||||
# Stores disk config options from Vagrantfile
|
||||
#
|
||||
# @param [Symbol] type
|
||||
# @param [Hash] options
|
||||
# @param [Block] block
|
||||
def disk(type, **options, &block)
|
||||
disk_config = VagrantConfigDisk.new(type)
|
||||
|
||||
# Remove provider__option options before set_options, otherwise will
|
||||
# show up as missing setting
|
||||
# Extract provider hash options as well
|
||||
provider_options = {}
|
||||
options.delete_if do |p,o|
|
||||
if o.is_a?(Hash) || p.to_s.include?("__")
|
||||
provider_options[p] = o
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
disk_config.set_options(options)
|
||||
|
||||
# Add provider config
|
||||
disk_config.add_provider_config(provider_options, &block)
|
||||
|
||||
if !Vagrant::Util::Experimental.feature_enabled?("disk_base_config")
|
||||
@logger.warn("Disk config defined, but experimental feature is not enabled. To use this feature, enable it with the experimental flag `disk_base_config`. Disk will not be added to internal config, and will be ignored.")
|
||||
return
|
||||
end
|
||||
|
||||
@disks << disk_config
|
||||
end
|
||||
|
||||
#-------------------------------------------------------------------
|
||||
# Internal methods, don't call these.
|
||||
#-------------------------------------------------------------------
|
||||
|
@ -509,7 +566,7 @@ module VagrantPlugins
|
|||
|
||||
line = "(unknown)"
|
||||
if e.backtrace && e.backtrace[0]
|
||||
line = e.backtrace[0].split(":")[1]
|
||||
line = e.backtrace.first.slice(0, e.backtrace.first.rindex(':')).rpartition(':').last
|
||||
end
|
||||
|
||||
raise Vagrant::Errors::VagrantfileLoadError,
|
||||
|
@ -547,6 +604,10 @@ module VagrantPlugins
|
|||
end
|
||||
end
|
||||
|
||||
@disks.each do |d|
|
||||
d.finalize!
|
||||
end
|
||||
|
||||
if !current_dir_shared && !@__synced_folders["/vagrant"]
|
||||
synced_folder(".", "/vagrant")
|
||||
end
|
||||
|
@ -609,7 +670,7 @@ module VagrantPlugins
|
|||
errors << I18n.t("vagrant.config.vm.clone_and_box")
|
||||
end
|
||||
|
||||
errors << I18n.t("vagrant.config.vm.hostname_invalid_characters") if \
|
||||
errors << I18n.t("vagrant.config.vm.hostname_invalid_characters", name: machine.name) if \
|
||||
@hostname && @hostname !~ /^[a-z0-9][-.a-z0-9]*$/i
|
||||
|
||||
if @box_version
|
||||
|
@ -748,6 +809,26 @@ module VagrantPlugins
|
|||
end
|
||||
end
|
||||
|
||||
# Validate disks
|
||||
# Check if there is more than one primrary disk defined and throw an error
|
||||
primary_disks = @disks.select { |d| d.primary && d.type == :disk }
|
||||
if primary_disks.size > 1
|
||||
errors << I18n.t("vagrant.config.vm.multiple_primary_disks_error",
|
||||
name: machine.name)
|
||||
end
|
||||
|
||||
disk_names = @disks.map { |d| d.name }
|
||||
duplicate_names = disk_names.detect{ |d| disk_names.count(d) > 1 }
|
||||
if duplicate_names && duplicate_names.size
|
||||
errors << I18n.t("vagrant.config.vm.multiple_disk_names_error",
|
||||
name: duplicate_names)
|
||||
end
|
||||
|
||||
@disks.each do |d|
|
||||
error = d.validate(machine)
|
||||
errors.concat error if !error.empty?
|
||||
end
|
||||
|
||||
# We're done with VM level errors so prepare the section
|
||||
errors = { "vm" => errors }
|
||||
|
||||
|
|
|
@ -9,7 +9,10 @@ module VagrantPlugins
|
|||
|
||||
# Unique name for this provisioner
|
||||
#
|
||||
# @return [String]
|
||||
# Accepts a string, but is ultimately forced into a symbol in the top level method inside
|
||||
# #Config::VM.provision method while being parsed from a Vagrantfile
|
||||
#
|
||||
# @return [Symbol]
|
||||
attr_reader :name
|
||||
|
||||
# Internal unique name for this provisioner
|
||||
|
|
|
@ -19,7 +19,7 @@ module VagrantPlugins
|
|||
build_dir ||= machine.provider_config.build_dir
|
||||
git_repo = env[:git_repo]
|
||||
git_repo ||= machine.provider_config.git_repo
|
||||
|
||||
|
||||
# If we're not building a container, then just skip this step
|
||||
return @app.call(env) if (!build_dir && !git_repo)
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ module VagrantPlugins
|
|||
# @return [Array<Socket::Ifaddr>] interface list
|
||||
def list_interfaces
|
||||
Socket.getifaddrs.find_all do |i|
|
||||
i.addr.ip? && !i.addr.ipv4_loopback? &&
|
||||
!i.addr.nil? && i.addr.ip? && !i.addr.ipv4_loopback? &&
|
||||
!i.addr.ipv6_loopback? && !i.addr.ipv6_linklocal?
|
||||
end
|
||||
end
|
||||
|
@ -153,6 +153,10 @@ module VagrantPlugins
|
|||
|
||||
# Generate configuration for public network
|
||||
#
|
||||
# TODO: When the Vagrant installer upgrades to Ruby 2.5.x,
|
||||
# remove all instances of the roundabout way of determining a prefix
|
||||
# and instead just use the built-in `.prefix` method
|
||||
#
|
||||
# @param [Hash] root_options Root networking options
|
||||
# @param [Hash] net_options Docker scoped networking options
|
||||
# @param [Hash] env Local call env
|
||||
|
@ -191,7 +195,9 @@ module VagrantPlugins
|
|||
base_opts[:opt] = "parent=#{bridge_interface.name}"
|
||||
subnet = IPAddr.new(bridge_interface.addr.ip_address <<
|
||||
"/" << bridge_interface.netmask.ip_unpack.first)
|
||||
base_opts[:subnet] = "#{subnet}/#{subnet.prefix}"
|
||||
netmask = bridge_interface.netmask.ip_unpack.first
|
||||
prefix = IPAddr.new("255.255.255.255/#{netmask}").to_i.to_s(2).count("1")
|
||||
base_opts[:subnet] = "#{subnet}/#{prefix}"
|
||||
subnet_addr = IPAddr.new(base_opts[:subnet])
|
||||
base_opts[:driver] = "macvlan"
|
||||
base_opts[:gateway] = subnet_addr.succ.to_s
|
||||
|
@ -213,7 +219,7 @@ module VagrantPlugins
|
|||
network_options, bridge_interface.name, env)
|
||||
end
|
||||
network_options[:ip_range] = request_public_iprange(
|
||||
network_options, bridge_interface.name, env)
|
||||
network_options, bridge_interface, env)
|
||||
end
|
||||
end
|
||||
[network_name, network_options]
|
||||
|
@ -257,8 +263,12 @@ module VagrantPlugins
|
|||
# Request the IP range allowed for use by docker when creating a new
|
||||
# public network
|
||||
#
|
||||
# TODO: When the Vagrant installer upgrades to Ruby 2.5.x,
|
||||
# remove all instances of the roundabout way of determining a prefix
|
||||
# and instead just use the built-in `.prefix` method
|
||||
#
|
||||
# @param [Hash] network_options Docker scoped networking options
|
||||
# @param [String] interface The bridge interface used
|
||||
# @param [Socket::Ifaddr] interface The bridge interface used
|
||||
# @param [Hash] env Local call env
|
||||
# @return [String] Address range
|
||||
def request_public_iprange(network_options, interface, env)
|
||||
|
@ -272,7 +282,7 @@ module VagrantPlugins
|
|||
while !range
|
||||
range = env[:ui].ask(I18n.t(
|
||||
"docker_provider.network_bridge_iprange_request",
|
||||
interface: interface,
|
||||
interface: interface.name,
|
||||
default_range: network_options[:subnet]) + " ",
|
||||
prefix: false
|
||||
).strip
|
||||
|
@ -282,11 +292,12 @@ module VagrantPlugins
|
|||
begin
|
||||
range = IPAddr.new(range)
|
||||
if !subnet.include?(range)
|
||||
puts "we in here"
|
||||
netmask = interface.netmask.ip_unpack.first
|
||||
prefix = IPAddr.new("255.255.255.255/#{netmask}").to_i.to_s(2).count("1")
|
||||
env[:ui].warn(I18n.t(
|
||||
"docker_provider.network_bridge_iprange_outofbounds",
|
||||
subnet: network_options[:subnet],
|
||||
range: "#{range}/#{range.prefix}"
|
||||
range: "#{range}/#{prefix}"
|
||||
) + "\n", prefix: false)
|
||||
range = nil
|
||||
end
|
||||
|
@ -297,7 +308,10 @@ module VagrantPlugins
|
|||
range = nil
|
||||
end
|
||||
end
|
||||
"#{range}/#{range.prefix}"
|
||||
|
||||
netmask = interface.netmask.ip_unpack.first
|
||||
prefix = IPAddr.new("255.255.255.255/#{netmask}").to_i.to_s(2).count("1")
|
||||
"#{range}/#{prefix}"
|
||||
end
|
||||
|
||||
# Execute the action
|
||||
|
|
|
@ -15,19 +15,31 @@ module VagrantPlugins
|
|||
@executor = Executor::Local.new
|
||||
end
|
||||
|
||||
# Returns the id for a new container built from `docker build`. Raises
|
||||
# an exception if the id was unable to be captured from the output
|
||||
#
|
||||
# @return [String] id - ID matched from the docker build output.
|
||||
def build(dir, **opts, &block)
|
||||
args = Array(opts[:extra_args])
|
||||
args << dir
|
||||
result = execute('docker', 'build', *args, &block)
|
||||
matches = result.scan(/Successfully built (.+)$/i)
|
||||
if matches.empty?
|
||||
# This will cause a stack trace in Vagrant, but it is a bug
|
||||
# if this happens anyways.
|
||||
raise "UNKNOWN OUTPUT: #{result}"
|
||||
args = Array(opts[:extra_args])
|
||||
args << dir
|
||||
opts = {with_stderr: true}
|
||||
result = execute('docker', 'build', *args, opts, &block)
|
||||
matches = result.match(/Successfully built (?<id>.+)$/i)
|
||||
if !matches
|
||||
# Check for the new output format 'writing image sha256...'
|
||||
# In this case, docker builtkit is enabled. Its format is different
|
||||
# from standard docker
|
||||
@logger.warn("Could not determine docker container ID. Scanning for buildkit output instead")
|
||||
matches = result.match(/writing image .+:(?<id>[0-9a-z]+) done/i)
|
||||
if !matches
|
||||
# This will cause a stack trace in Vagrant, but it is a bug
|
||||
# if this happens anyways.
|
||||
raise Errors::BuildError, result: result
|
||||
end
|
||||
end
|
||||
|
||||
# Return the last match, and the capture of it
|
||||
matches[-1][0]
|
||||
# Return the matched group `id`
|
||||
matches[:id]
|
||||
end
|
||||
|
||||
def create(params, **opts, &block)
|
||||
|
|
|
@ -34,6 +34,12 @@ module VagrantPlugins
|
|||
@logger.debug("Data directory for composition file `#{@data_directory}`")
|
||||
end
|
||||
|
||||
# Updates the docker compose config file with the given arguments
|
||||
#
|
||||
# @param [String] dir - local directory or git repo URL
|
||||
# @param [Hash] opts - valid key: extra_args
|
||||
# @param [Block] block
|
||||
# @return [Nil]
|
||||
def build(dir, **opts, &block)
|
||||
name = machine.name.to_s
|
||||
@logger.debug("Applying build for `#{name}` using `#{dir}` directory.")
|
||||
|
@ -47,26 +53,26 @@ module VagrantPlugins
|
|||
services[name]["build"]["dockerfile"] = opts[:extra_args][opts[:extra_args].index("--file") + 1]
|
||||
end
|
||||
# Extract any build args that can be found
|
||||
case opts[:build_args]
|
||||
case opts[:extra_args]
|
||||
when Array
|
||||
if opts[:build_args].include?("--build-arg")
|
||||
if opts[:extra_args].include?("--build-arg")
|
||||
idx = 0
|
||||
build_args = {}
|
||||
while(idx < opts[:build_args].size)
|
||||
arg_value = opts[:build_args][idx]
|
||||
extra_args = {}
|
||||
while(idx < opts[:extra_args].size)
|
||||
arg_value = opts[:extra_args][idx]
|
||||
idx += 1
|
||||
if arg_value.start_with?("--build-arg")
|
||||
if !arg_value.include?("=")
|
||||
arg_value = opts[:build_args][idx]
|
||||
arg_value = opts[:extra_args][idx]
|
||||
idx += 1
|
||||
end
|
||||
key, val = arg_value.to_s.split("=", 2).to_s.split("=")
|
||||
build_args[key] = val
|
||||
extra_args[key] = val
|
||||
end
|
||||
end
|
||||
end
|
||||
when Hash
|
||||
services[name]["build"]["args"] = opts[:build_args]
|
||||
services[name]["build"]["args"] = opts[:extra_args]
|
||||
end
|
||||
end
|
||||
rescue => error
|
||||
|
|
|
@ -5,6 +5,10 @@ module VagrantPlugins
|
|||
error_namespace("docker_provider.errors")
|
||||
end
|
||||
|
||||
class BuildError < DockerError
|
||||
error_key(:build_error)
|
||||
end
|
||||
|
||||
class CommunicatorNonDocker < DockerError
|
||||
error_key(:communicator_non_docker)
|
||||
end
|
||||
|
|
|
@ -27,7 +27,13 @@ module VagrantPlugins
|
|||
stdout: result.stdout
|
||||
end
|
||||
|
||||
result.stdout
|
||||
if opts
|
||||
if opts[:with_stderr]
|
||||
return result.stdout + " " + result.stderr
|
||||
else
|
||||
return result.stdout
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def windows?
|
||||
|
|
|
@ -11,6 +11,14 @@ module VagrantPlugins
|
|||
class Provider < Vagrant.plugin("2", :provider)
|
||||
@@host_vm_mutex = Mutex.new
|
||||
|
||||
def self.usable?(raise_error=false)
|
||||
Driver.new.execute("docker", "version")
|
||||
true
|
||||
rescue Vagrant::Errors::CommandUnavailable, Errors::ExecuteError
|
||||
raise if raise_error
|
||||
return false
|
||||
end
|
||||
|
||||
def initialize(machine)
|
||||
@logger = Log4r::Logger.new("vagrant::provider::docker")
|
||||
@machine = machine
|
||||
|
@ -45,14 +53,6 @@ module VagrantPlugins
|
|||
@driver
|
||||
end
|
||||
|
||||
def usable?(raise_error=false)
|
||||
driver.execute("docker", "version")
|
||||
true
|
||||
rescue Vagrant::Errors::CommandUnavailable, Errors::ExecuteError
|
||||
raise if raise_error
|
||||
return false
|
||||
end
|
||||
|
||||
# This returns the {Vagrant::Machine} that is our host machine.
|
||||
# It does not perform any action on the machine or verify it is
|
||||
# running.
|
||||
|
|
|
@ -355,6 +355,9 @@ function Report-ErrorVagrantVMImport {
|
|||
)
|
||||
|
||||
$ManagementService = Get-WmiObject -Namespace 'root\virtualization\v2' -Class 'Msvm_VirtualSystemManagementService'
|
||||
if($null -eq $ManagementService) {
|
||||
throw 'The Hyper-V Virtual Machine Management Service (VMMS) is not running.'
|
||||
}
|
||||
|
||||
# Relative path names will fail when attempting to import a system
|
||||
# definition so always ensure we are using the full path to the
|
||||
|
|
|
@ -79,6 +79,7 @@ module VagrantPlugins
|
|||
b.use ForwardPorts
|
||||
b.use SetHostname
|
||||
b.use SaneDefaults
|
||||
b.use Disk
|
||||
b.use Customize, "pre-boot"
|
||||
b.use Boot
|
||||
b.use Customize, "post-boot"
|
||||
|
|
|
@ -13,7 +13,16 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
def call(env)
|
||||
clean_machine_folder(env[:machine].provider.driver.read_machine_folder)
|
||||
machine_folder = env[:machine].provider.driver.read_machine_folder
|
||||
|
||||
begin
|
||||
clean_machine_folder(machine_folder)
|
||||
rescue Errno::EPERM
|
||||
raise Vagrant::Errors::MachineFolderNotAccessible,
|
||||
name: env[:machine].name,
|
||||
path: machine_folder
|
||||
end
|
||||
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
|
|
|
@ -64,6 +64,7 @@ module VagrantPlugins
|
|||
"5.1" => Version_5_1,
|
||||
"5.2" => Version_5_2,
|
||||
"6.0" => Version_6_0,
|
||||
"6.1" => Version_6_1,
|
||||
}
|
||||
|
||||
if @@version.start_with?("4.2.14")
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
require File.expand_path("../version_6_0", __FILE__)
|
||||
|
||||
module VagrantPlugins
|
||||
module ProviderVirtualBox
|
||||
module Driver
|
||||
# Driver for VirtualBox 6.1.x
|
||||
class Version_6_1 < Version_6_0
|
||||
def initialize(uuid)
|
||||
super
|
||||
|
||||
@logger = Log4r::Logger.new("vagrant::provider::virtualbox_6_1")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -59,6 +59,7 @@ module VagrantPlugins
|
|||
autoload :Version_5_1, File.expand_path("../driver/version_5_1", __FILE__)
|
||||
autoload :Version_5_2, File.expand_path("../driver/version_5_2", __FILE__)
|
||||
autoload :Version_6_0, File.expand_path("../driver/version_6_0", __FILE__)
|
||||
autoload :Version_6_1, File.expand_path("../driver/version_6_1", __FILE__)
|
||||
end
|
||||
|
||||
module Model
|
||||
|
|
|
@ -8,9 +8,10 @@ module VagrantPlugins
|
|||
def self.installed?
|
||||
Driver::Meta.new
|
||||
true
|
||||
rescue Vagrant::Errors::VirtualBoxInvalidVersion
|
||||
return false
|
||||
rescue Vagrant::Errors::VirtualBoxNotDetected
|
||||
rescue Vagrant::Errors::VirtualBoxInvalidVersion,
|
||||
Vagrant::Errors::VirtualBoxNotDetected,
|
||||
Vagrant::Errors::VirtualBoxKernelModuleNotLoaded,
|
||||
Vagrant::Errors::VirtualBoxInstallIncomplete
|
||||
return false
|
||||
end
|
||||
|
||||
|
@ -19,10 +20,10 @@ module VagrantPlugins
|
|||
# version and all that, which checks for VirtualBox being present
|
||||
Driver::Meta.new
|
||||
true
|
||||
rescue Vagrant::Errors::VirtualBoxInvalidVersion
|
||||
raise if raise_error
|
||||
return false
|
||||
rescue Vagrant::Errors::VirtualBoxNotDetected
|
||||
rescue Vagrant::Errors::VirtualBoxInvalidVersion,
|
||||
Vagrant::Errors::VirtualBoxNotDetected,
|
||||
Vagrant::Errors::VirtualBoxKernelModuleNotLoaded,
|
||||
Vagrant::Errors::VirtualBoxInstallIncomplete
|
||||
raise if raise_error
|
||||
return false
|
||||
end
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
require_relative "../../../errors"
|
||||
require_relative "../pip/pip"
|
||||
|
||||
module VagrantPlugins
|
||||
module Ansible
|
||||
|
@ -7,15 +8,31 @@ module VagrantPlugins
|
|||
module Arch
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args)
|
||||
if install_mode != :default
|
||||
raise Ansible::Errors::AnsiblePipInstallIsNotSupported
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
Pip::pip_install machine, "ansible", ansible_version, pip_args, true
|
||||
|
||||
when :pip_args_only
|
||||
pip_setup machine, pip_install_cmd
|
||||
Pip::pip_install machine, "", "", pip_args, false
|
||||
|
||||
else
|
||||
machine.communicate.sudo "pacman -Syy --noconfirm"
|
||||
machine.communicate.sudo "pacman -S --noconfirm ansible"
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
machine.communicate.sudo "pacman -Syy --noconfirm"
|
||||
machine.communicate.sudo "pacman -S --noconfirm base-devel curl git python"
|
||||
|
||||
Pip::get_pip machine, pip_install_cmd
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -7,8 +7,7 @@ module VagrantPlugins
|
|||
module Debian
|
||||
module AnsibleInstall
|
||||
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
|
@ -33,12 +32,12 @@ INLINE_CRIPT
|
|||
|
||||
machine.communicate.sudo install_backports_if_wheezy_release
|
||||
machine.communicate.sudo "apt-get update -y -qq"
|
||||
machine.communicate.sudo "apt-get install -y -qq ansible"
|
||||
machine.communicate.sudo "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" ansible"
|
||||
end
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd="")
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
machine.communicate.sudo "apt-get update -y -qq"
|
||||
machine.communicate.sudo "apt-get install -y -qq build-essential curl git libssl-dev libffi-dev python-dev"
|
||||
machine.communicate.sudo "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --option \"Dpkg::Options::=--force-confold\" build-essential curl git libssl-dev libffi-dev python-dev"
|
||||
Pip::get_pip machine, pip_install_cmd
|
||||
end
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ module VagrantPlugins
|
|||
module Fedora
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
|
@ -25,7 +25,7 @@ module VagrantPlugins
|
|||
|
||||
private
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd="")
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
rpm_package_manager = Facts::rpm_package_manager(machine)
|
||||
|
||||
machine.communicate.sudo "#{rpm_package_manager} install -y curl gcc gmp-devel libffi-devel openssl-devel python-crypto python-devel python-dnf python-setuptools redhat-rpm-config"
|
||||
|
|
|
@ -7,11 +7,11 @@ module VagrantPlugins
|
|||
module FreeBSD
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args)
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
if install_mode != :default
|
||||
raise Ansible::Errors::AnsiblePipInstallIsNotSupported
|
||||
else
|
||||
machine.communicate.sudo "yes | pkg install ansible"
|
||||
machine.communicate.sudo "pkg install -qy py36-ansible"
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -16,20 +16,23 @@ module VagrantPlugins
|
|||
end
|
||||
|
||||
args_array = [pip_args, upgrade_arg, "#{package}#{version_arg}"]
|
||||
args_array.reject! { |a| a.nil? || a.empty? }
|
||||
|
||||
machine.communicate.sudo "pip install #{args_array.join(' ')}"
|
||||
pip_install = "pip install"
|
||||
pip_install += " #{args_array.join(' ')}" unless args_array.empty?
|
||||
|
||||
machine.communicate.sudo pip_install
|
||||
end
|
||||
|
||||
def self.get_pip(machine, pip_install_cmd=DEFAULT_PIP_INSTALL_CMD)
|
||||
|
||||
def self.get_pip(machine, pip_install_cmd = DEFAULT_PIP_INSTALL_CMD)
|
||||
# The objective here is to get pip either by default
|
||||
# or by the argument passed in. The objective is not
|
||||
# or by the argument passed in. The objective is not
|
||||
# to circumvent the pip setup by passing in nothing.
|
||||
# Thus, we stick with the default on an empty string.
|
||||
# Typecast added in the check for safety.
|
||||
|
||||
if pip_install_cmd.to_s.empty?
|
||||
pip_install_cmd=DEFAULT_PIP_INSTALL_CMD
|
||||
pip_install_cmd = DEFAULT_PIP_INSTALL_CMD
|
||||
end
|
||||
|
||||
machine.ui.detail I18n.t("vagrant.provisioners.ansible.installing_pip")
|
||||
|
|
|
@ -8,7 +8,7 @@ module VagrantPlugins
|
|||
module RedHat
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
case install_mode
|
||||
when :pip
|
||||
pip_setup machine, pip_install_cmd
|
||||
|
@ -33,7 +33,7 @@ module VagrantPlugins
|
|||
machine.communicate.sudo "#{rpm_package_manager} -y --enablerepo=epel install ansible"
|
||||
end
|
||||
|
||||
def self.pip_setup(machine, pip_install_cmd="")
|
||||
def self.pip_setup(machine, pip_install_cmd = "")
|
||||
rpm_package_manager = Facts::rpm_package_manager(machine)
|
||||
|
||||
machine.communicate.sudo("#{rpm_package_manager} -y install curl gcc libffi-devel openssl-devel python-crypto python-devel python-setuptools")
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
require_relative "../../../errors"
|
||||
|
||||
module VagrantPlugins
|
||||
module Ansible
|
||||
|
@ -6,7 +7,7 @@ module VagrantPlugins
|
|||
module SUSE
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args)
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
if install_mode != :default
|
||||
raise Ansible::Errors::AnsiblePipInstallIsNotSupported
|
||||
else
|
||||
|
|
|
@ -7,7 +7,7 @@ module VagrantPlugins
|
|||
module Ubuntu
|
||||
module AnsibleInstall
|
||||
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd="")
|
||||
def self.ansible_install(machine, install_mode, ansible_version, pip_args, pip_install_cmd = "")
|
||||
if install_mode != :default
|
||||
Debian::AnsibleInstall::ansible_install machine, install_mode, ansible_version, pip_args, pip_install_cmd
|
||||
else
|
||||
|
@ -21,13 +21,13 @@ module VagrantPlugins
|
|||
unless machine.communicate.test("test -x \"$(which add-apt-repository)\"")
|
||||
machine.communicate.sudo """
|
||||
apt-get update -y -qq && \
|
||||
apt-get install -y -qq software-properties-common
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq software-properties-common --option \"Dpkg::Options::=--force-confold\"
|
||||
"""
|
||||
end
|
||||
machine.communicate.sudo """
|
||||
add-apt-repository ppa:ansible/ansible -y && \
|
||||
apt-get update -y -qq && \
|
||||
apt-get install -y -qq ansible
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq ansible --option \"Dpkg::Options::=--force-confold\"
|
||||
"""
|
||||
end
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ module VagrantPlugins
|
|||
@install = UNSET_VALUE
|
||||
@install_mode = UNSET_VALUE
|
||||
@pip_args = UNSET_VALUE
|
||||
@pip_install_cmd = UNSET_VALUE
|
||||
@pip_install_cmd = UNSET_VALUE
|
||||
@provisioning_path = UNSET_VALUE
|
||||
@tmp_path = UNSET_VALUE
|
||||
end
|
||||
|
@ -30,7 +30,7 @@ module VagrantPlugins
|
|||
@install = true if @install == UNSET_VALUE
|
||||
@install_mode = :default if @install_mode == UNSET_VALUE
|
||||
@pip_args = "" if @pip_args == UNSET_VALUE
|
||||
@pip_install_cmd = "" if @pip_args == UNSET_VALUE
|
||||
@pip_install_cmd = "" if @pip_install_cmd == UNSET_VALUE
|
||||
@provisioning_path = "/vagrant" if provisioning_path == UNSET_VALUE
|
||||
@tmp_path = "/tmp/vagrant-ansible" if tmp_path == UNSET_VALUE
|
||||
end
|
||||
|
|
|
@ -33,7 +33,7 @@ module VagrantPlugins
|
|||
# Current limitations:
|
||||
# - The installation of a specific Ansible version is only supported by
|
||||
# the "pip" install_mode. Note that "pip" installation also takes place
|
||||
# via a default command. If pip needs to be installed differently then
|
||||
# via a default command. If pip needs to be installed differently then
|
||||
# the command can be overwritten by supplying "pip_install_cmd" in the
|
||||
# config settings.
|
||||
# - There is no absolute guarantee that the automated installation will replace
|
||||
|
|
|
@ -6,7 +6,7 @@ module VagrantPlugins
|
|||
module FreeBSD
|
||||
module ChefInstall
|
||||
def self.chef_install(machine, project, version, channel, omnibus_url, options = {})
|
||||
machine.communicate.sudo("pkg install -y -qq curl bash")
|
||||
machine.communicate.sudo("pkg install -qy curl bash")
|
||||
|
||||
command = Omnibus.sh_command(project, version, channel, omnibus_url, options)
|
||||
machine.communicate.sudo(command)
|
||||
|
|
|
@ -3,7 +3,7 @@ module VagrantPlugins
|
|||
class Provisioner < Vagrant.plugin("2", :provisioner)
|
||||
def provision
|
||||
@machine.communicate.tap do |comm|
|
||||
source = File.expand_path(config.source)
|
||||
source = File.expand_path(config.source, @machine.env.cwd)
|
||||
destination = expand_guest_path(config.destination)
|
||||
|
||||
# If the source is a directory determine if any path modifications
|
||||
|
|
|
@ -7,6 +7,9 @@ module VagrantPlugins
|
|||
attr_accessor :path
|
||||
attr_accessor :md5
|
||||
attr_accessor :sha1
|
||||
attr_accessor :sha256
|
||||
attr_accessor :sha384
|
||||
attr_accessor :sha512
|
||||
attr_accessor :env
|
||||
attr_accessor :upload_path
|
||||
attr_accessor :args
|
||||
|
@ -26,6 +29,9 @@ module VagrantPlugins
|
|||
@path = UNSET_VALUE
|
||||
@md5 = UNSET_VALUE
|
||||
@sha1 = UNSET_VALUE
|
||||
@sha256 = UNSET_VALUE
|
||||
@sha384 = UNSET_VALUE
|
||||
@sha512 = UNSET_VALUE
|
||||
@env = UNSET_VALUE
|
||||
@upload_path = UNSET_VALUE
|
||||
@privileged = UNSET_VALUE
|
||||
|
@ -45,6 +51,9 @@ module VagrantPlugins
|
|||
@path = nil if @path == UNSET_VALUE
|
||||
@md5 = nil if @md5 == UNSET_VALUE
|
||||
@sha1 = nil if @sha1 == UNSET_VALUE
|
||||
@sha256 = nil if @sha256 == UNSET_VALUE
|
||||
@sha384 = nil if @sha384 == UNSET_VALUE
|
||||
@sha512 = nil if @sha512 == UNSET_VALUE
|
||||
@env = {} if @env == UNSET_VALUE
|
||||
@upload_path = "/tmp/vagrant-shell" if @upload_path == UNSET_VALUE
|
||||
@privileged = true if @privileged == UNSET_VALUE
|
||||
|
|
|
@ -253,7 +253,10 @@ module VagrantPlugins
|
|||
config.path,
|
||||
download_path,
|
||||
md5: config.md5,
|
||||
sha1: config.sha1
|
||||
sha1: config.sha1,
|
||||
sha256: config.sha256,
|
||||
sha384: config.sha384,
|
||||
sha512: config.sha512
|
||||
).download!
|
||||
ext = File.extname(config.path)
|
||||
script = download_path.read
|
||||
|
|
|
@ -30,6 +30,10 @@ module VagrantPlugins
|
|||
exclude = exclude[1..-1]
|
||||
end
|
||||
|
||||
exclude = "#{exclude}/" if !exclude.end_with?("/")
|
||||
exclude = "^#{exclude}"
|
||||
exclude += ".*" if !start_anchor
|
||||
|
||||
# This is not an ideal solution, but it's a start. We can improve and
|
||||
# keep unit tests passing in the future.
|
||||
exclude = exclude.gsub("**", "|||GLOBAL|||")
|
||||
|
|
|
@ -70,7 +70,7 @@ Vagrant.configure("2") do |config|
|
|||
# information on available options.
|
||||
|
||||
# Enable provisioning with a shell script. Additional provisioners such as
|
||||
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
|
||||
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
|
||||
# documentation for more information about their specific syntax and use.
|
||||
# config.vm.provision "shell", inline: <<-SHELL
|
||||
# apt-get update
|
||||
|
|
|
@ -578,7 +578,7 @@ en:
|
|||
The specified checksum type is not supported by Vagrant: %{type}.
|
||||
Vagrant supports the following checksum types:
|
||||
|
||||
md5, sha1, sha256
|
||||
%{types}
|
||||
box_checksum_mismatch: |-
|
||||
The checksum of the downloaded box did not match the expected
|
||||
value. Please verify that you have the proper URL setup and that
|
||||
|
@ -964,6 +964,15 @@ en:
|
|||
If you believe this message is in error, please check the process
|
||||
listing for any "ruby" or "vagrant" processes and kill them. Then
|
||||
try again.
|
||||
machine_folder_not_accessible: |-
|
||||
Vagrant attempted to clean the machine folder for the machine '%{name}'
|
||||
but does not have permission to read the following path:
|
||||
|
||||
%{path}
|
||||
|
||||
Please ensure that Vagrant has the proper permissions to access the path
|
||||
above. You may need to grant this permission to the terminal emulator
|
||||
running Vagrant as well.
|
||||
machine_guest_not_ready: |-
|
||||
Guest-specific operations were attempted on a machine that is not
|
||||
ready for guest communication. This should not happen and a bug
|
||||
|
@ -1792,6 +1801,17 @@ en:
|
|||
# Translations for config validation errors
|
||||
#-------------------------------------------------------------------------------
|
||||
config:
|
||||
disk:
|
||||
invalid_type: |-
|
||||
Disk type '%{type}' is not a valid type. Please pick one of the following supported disk types: %{types}
|
||||
invalid_size: |-
|
||||
Config option 'size' for disk '%{name}' on guest '%{machine}' is not an integer
|
||||
invalid_file_type: |-
|
||||
Disk config option 'file' for '%{machine}' is not a string.
|
||||
missing_file: |-
|
||||
Disk file '%{file_path}' for disk '%{name}' on machine '%{machine}' does not exist.
|
||||
missing_provider: |-
|
||||
Guest '%{machine}' using provider '%{provider_name}' has provider specific config options for a provider other than '%{provider_name}'. These provider config options will be ignored for this guest
|
||||
common:
|
||||
bad_field: "The following settings shouldn't exist: %{fields}"
|
||||
chef:
|
||||
|
@ -1888,10 +1908,14 @@ en:
|
|||
box_missing: "A box must be specified."
|
||||
clone_and_box: "Only one of clone or box can be specified."
|
||||
hostname_invalid_characters: |-
|
||||
The hostname set for the VM should only contain letters, numbers,
|
||||
The hostname set for the VM '%{name}' should only contain letters, numbers,
|
||||
hyphens or dots. It cannot start with a hyphen or dot.
|
||||
ignore_provider_config: |-
|
||||
Ignoring provider config for validation...
|
||||
multiple_primary_disks_error: |-
|
||||
There are more than one primary disks defined for guest '%{name}'. Please ensure that only one disk has been defined as a primary disk.
|
||||
multiple_disk_names_error: |-
|
||||
Duplicate disk names defined: '%{name}'. Disk names must be unique.
|
||||
name_invalid: |-
|
||||
The sub-VM name '%{name}' is invalid. Please don't use special characters.
|
||||
network_ip_ends_in_one: |-
|
||||
|
@ -2053,6 +2077,9 @@ en:
|
|||
No pushed snapshot found!
|
||||
|
||||
Use `vagrant snapshot push` to push a snapshot to restore to.
|
||||
save:
|
||||
vm_not_created: |-
|
||||
Machine '%{name}' has not been created yet, and therefore cannot save snapshots. Skipping...
|
||||
status:
|
||||
aborted: |-
|
||||
The VM is in an aborted state. This means that it was abruptly
|
||||
|
@ -2135,6 +2162,9 @@ en:
|
|||
runner:
|
||||
waiting_cleanup: "Waiting for cleanup before exiting..."
|
||||
exit_immediately: "Exiting immediately, without cleanup!"
|
||||
disk:
|
||||
provider_unsupported: |-
|
||||
Guest provider '%{provider}' does not support the disk feature, and will not use the disk configuration defined.
|
||||
vm:
|
||||
boot:
|
||||
booting: Booting VM...
|
||||
|
|
|
@ -159,6 +159,8 @@ en:
|
|||
run exits and doesn't keep running.
|
||||
|
||||
errors:
|
||||
build_error: |-
|
||||
Vagrant received unknown output from `docker build` while building a container: %{result}
|
||||
compose_lock_timeout: |-
|
||||
Vagrant encountered a timeout waiting for the docker compose driver
|
||||
to become available. Please try to run your command again. If you
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
# VAGRANT-BEGIN: <%= user %> <%= uuid %>
|
||||
<% folders.each do |dirs, opts| %>
|
||||
<% dirs.each do |d| %>
|
||||
<%= d %> <%=opts[:bsd__compiled_nfs_options] %> <%= ips.join(" ") %>
|
||||
<% end %>
|
||||
<% end %>
|
||||
# VAGRANT-END: <%= user %> <%= uuid %>
|
|
@ -0,0 +1,140 @@
|
|||
require File.expand_path("../../../../../base", __FILE__)
|
||||
|
||||
require Vagrant.source_root.join("plugins/commands/box/command/outdated")
|
||||
|
||||
describe VagrantPlugins::CommandBox::Command::Outdated do
|
||||
include_context "unit"
|
||||
|
||||
let(:argv) { [] }
|
||||
let(:iso_env) do
|
||||
env = isolated_environment
|
||||
env.vagrantfile("")
|
||||
env.create_vagrant_env
|
||||
end
|
||||
|
||||
subject { described_class.new(argv, iso_env) }
|
||||
|
||||
let(:action_runner) { double("action_runner") }
|
||||
|
||||
before do
|
||||
allow(iso_env).to receive(:action_runner).and_return(action_runner)
|
||||
end
|
||||
|
||||
context "with force argument" do
|
||||
let(:argv) { ["--force"] }
|
||||
|
||||
it "passes along the force update option" do
|
||||
expect(action_runner).to receive(:run).with(any_args) { |action, **opts|
|
||||
expect(opts[:box_outdated_force]).to be_truthy
|
||||
true
|
||||
}
|
||||
subject.execute
|
||||
end
|
||||
end
|
||||
|
||||
context "with global argument" do
|
||||
let(:argv) { ["--global"] }
|
||||
|
||||
it "calls outdated_global" do
|
||||
expect(subject).to receive(:outdated_global)
|
||||
|
||||
subject.execute
|
||||
end
|
||||
|
||||
describe ".outdated_global" do
|
||||
let(:test_iso_env) { isolated_environment }
|
||||
|
||||
let(:md) {
|
||||
md = Vagrant::BoxMetadata.new(StringIO.new(<<-RAW))
|
||||
{
|
||||
"name": "foo",
|
||||
"versions": [
|
||||
{
|
||||
"version": "1.0"
|
||||
},
|
||||
{
|
||||
"version": "1.1",
|
||||
"providers": [
|
||||
{
|
||||
"name": "virtualbox",
|
||||
"url": "bar"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"version": "1.2",
|
||||
"providers": [
|
||||
{
|
||||
"name": "vmware",
|
||||
"url": "baz"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
RAW
|
||||
}
|
||||
|
||||
let(:collection) do
|
||||
collection = double("collection")
|
||||
allow(collection).to receive(:all).and_return([box])
|
||||
allow(collection).to receive(:find).and_return(box)
|
||||
collection
|
||||
end
|
||||
|
||||
context "when latest version is available for provider" do
|
||||
let(:box) do
|
||||
box_dir = test_iso_env.box3("foo", "1.0", :vmware)
|
||||
box = Vagrant::Box.new(
|
||||
"foo", :vmware, "1.0", box_dir, metadata_url: "foo")
|
||||
allow(box).to receive(:load_metadata).and_return(md)
|
||||
box
|
||||
end
|
||||
|
||||
it "displays the latest version" do
|
||||
allow(iso_env).to receive(:boxes).and_return(collection)
|
||||
|
||||
expect(I18n).to receive(:t).with(/box_outdated$/, hash_including(latest: "1.2"))
|
||||
|
||||
subject.outdated_global({})
|
||||
end
|
||||
end
|
||||
|
||||
context "when latest version isn't available for provider" do
|
||||
let(:box) do
|
||||
box_dir = test_iso_env.box3("foo", "1.0", :virtualbox)
|
||||
box = Vagrant::Box.new(
|
||||
"foo", :virtualbox, "1.0", box_dir, metadata_url: "foo")
|
||||
allow(box).to receive(:load_metadata).and_return(md)
|
||||
box
|
||||
end
|
||||
|
||||
it "displays the latest version for that provider" do
|
||||
allow(iso_env).to receive(:boxes).and_return(collection)
|
||||
|
||||
expect(I18n).to receive(:t).with(/box_outdated$/, hash_including(latest: "1.1"))
|
||||
|
||||
subject.outdated_global({})
|
||||
end
|
||||
end
|
||||
|
||||
context "when no versions are available for provider" do
|
||||
let(:box) do
|
||||
box_dir = test_iso_env.box3("foo", "1.0", :libvirt)
|
||||
box = Vagrant::Box.new(
|
||||
"foo", :libvirt, "1.0", box_dir, metadata_url: "foo")
|
||||
allow(box).to receive(:load_metadata).and_return(md)
|
||||
box
|
||||
end
|
||||
|
||||
it "displays up to date message" do
|
||||
allow(iso_env).to receive(:boxes).and_return(collection)
|
||||
|
||||
expect(I18n).to receive(:t).with(/box_up_to_date$/, hash_including(version: "1.0"))
|
||||
|
||||
subject.outdated_global({})
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -48,7 +48,7 @@ describe VagrantPlugins::CloudCommand::ProviderCommand::Command::Create do
|
|||
|
||||
it "creates a provider" do
|
||||
allow(VagrantCloud::Provider).to receive(:new).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token, nil, nil, nil).
|
||||
and_return(provider)
|
||||
|
||||
expect(VagrantPlugins::CloudCommand::Util).to receive(:format_box_results)
|
||||
|
@ -59,7 +59,7 @@ describe VagrantPlugins::CloudCommand::ProviderCommand::Command::Create do
|
|||
|
||||
it "displays an error if encoutering a problem with the request" do
|
||||
allow(VagrantCloud::Provider).to receive(:new).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token, nil, nil, nil).
|
||||
and_return(provider)
|
||||
|
||||
allow(provider).to receive(:create_provider).
|
||||
|
@ -73,7 +73,7 @@ describe VagrantPlugins::CloudCommand::ProviderCommand::Command::Create do
|
|||
|
||||
it "creates a provider" do
|
||||
allow(VagrantCloud::Provider).to receive(:new).
|
||||
with(version, "virtualbox", nil, "https://box.com/box", "vagrant", "box-name", client.token).
|
||||
with(version, "virtualbox", nil, "https://box.com/box", "vagrant", "box-name", client.token, nil, nil, nil).
|
||||
and_return(provider)
|
||||
|
||||
expect(VagrantPlugins::CloudCommand::Util).to receive(:format_box_results)
|
||||
|
|
|
@ -48,7 +48,7 @@ describe VagrantPlugins::CloudCommand::ProviderCommand::Command::Update do
|
|||
|
||||
it "updates a provider" do
|
||||
allow(VagrantCloud::Provider).to receive(:new).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token, nil, nil, nil).
|
||||
and_return(provider)
|
||||
|
||||
expect(VagrantPlugins::CloudCommand::Util).to receive(:format_box_results)
|
||||
|
@ -59,7 +59,7 @@ describe VagrantPlugins::CloudCommand::ProviderCommand::Command::Update do
|
|||
|
||||
it "displays an error if encoutering a problem with the request" do
|
||||
allow(VagrantCloud::Provider).to receive(:new).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token).
|
||||
with(version, "virtualbox", nil, nil, "vagrant", "box-name", client.token, nil, nil, nil).
|
||||
and_return(provider)
|
||||
|
||||
allow(provider).to receive(:update).
|
||||
|
@ -73,7 +73,7 @@ describe VagrantPlugins::CloudCommand::ProviderCommand::Command::Update do
|
|||
|
||||
it "creates a provider" do
|
||||
allow(VagrantCloud::Provider).to receive(:new).
|
||||
with(version, "virtualbox", nil, "https://box.com/box", "vagrant", "box-name", client.token).
|
||||
with(version, "virtualbox", nil, "https://box.com/box", "vagrant", "box-name", client.token, nil, nil, nil).
|
||||
and_return(provider)
|
||||
|
||||
expect(VagrantPlugins::CloudCommand::Util).to receive(:format_box_results)
|
||||
|
|
|
@ -52,6 +52,15 @@ describe VagrantPlugins::CloudCommand::Command::Publish do
|
|||
let(:argv) { ["vagrant/box", "1.0.0", "virtualbox"] }
|
||||
|
||||
it "shows help" do
|
||||
expect { subject.execute }.
|
||||
to raise_error(Vagrant::Errors::CLIInvalidUsage)
|
||||
end
|
||||
end
|
||||
|
||||
context "missing box file" do
|
||||
let(:argv) { ["vagrant/box", "1.0.0", "virtualbox", "/notreal/file.box"] }
|
||||
|
||||
it "raises an exception" do
|
||||
allow(File).to receive(:file?).and_return(false)
|
||||
expect { subject.execute }.
|
||||
to raise_error(Vagrant::Errors::BoxFileNotExist)
|
||||
|
|
|
@ -76,6 +76,27 @@ describe VagrantPlugins::CommandSnapshot::Command::Save do
|
|||
end
|
||||
end
|
||||
|
||||
context "with a snapshot guest and name given" do
|
||||
let(:argv) { ["foo", "backup"] }
|
||||
it "calls snapshot_save with a snapshot name" do
|
||||
machine.id = "foo"
|
||||
|
||||
expect(machine).to receive(:action) do |name, opts|
|
||||
expect(name).to eq(:snapshot_save)
|
||||
expect(opts[:snapshot_name]).to eq("backup")
|
||||
end
|
||||
|
||||
expect(subject.execute).to eq(0)
|
||||
end
|
||||
|
||||
it "doesn't snapshot a non-existent machine" do
|
||||
machine.id = nil
|
||||
|
||||
expect(machine).to_not receive(:action)
|
||||
expect(subject.execute).to eq(0)
|
||||
end
|
||||
end
|
||||
|
||||
context "with a duplicate snapshot name given and no force flag" do
|
||||
let(:argv) { ["test"] }
|
||||
|
||||
|
|
|
@ -16,9 +16,9 @@ describe 'VagrantPlugins::GuestAlpine::Cap::RSync' do
|
|||
VagrantPlugins::GuestAlpine::Plugin.components.guest_capabilities[:alpine].get(:rsync_install)
|
||||
end
|
||||
|
||||
it 'should install rsync' do
|
||||
it 'should install rsync with --update-cache flag' do
|
||||
# communicator.should_receive(:sudo).with('apk add rsync')
|
||||
expect(communicator).to receive(:sudo).with('apk add rsync')
|
||||
expect(communicator).to receive(:sudo).with('apk add --update-cache rsync')
|
||||
allow_message_expectations_on_nil
|
||||
described_class.rsync_install(machine)
|
||||
end
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
require File.expand_path("../../../../base", __FILE__)
|
||||
|
||||
|
||||
describe VagrantPlugins::GuestAlpine::Plugin do
|
||||
let(:manager) { double("manager") }
|
||||
|
||||
before do
|
||||
allow(Vagrant::Plugin::Manager).to receive(:instance).and_return(manager)
|
||||
end
|
||||
|
||||
context "when vagrant-alpine plugin is not installed" do
|
||||
before do
|
||||
allow(manager).to receive(:installed_plugins).and_return({})
|
||||
end
|
||||
|
||||
it "should not display a warning" do
|
||||
expect($stderr).to_not receive(:puts)
|
||||
VagrantPlugins::GuestAlpine::Plugin.check_community_plugin
|
||||
end
|
||||
end
|
||||
|
||||
context "when vagrant-alpine plugin is installed" do
|
||||
before do
|
||||
allow(manager).to receive(:installed_plugins).and_return({ "vagrant-alpine" => {} })
|
||||
end
|
||||
|
||||
it "should display a warning" do
|
||||
expect($stderr).to receive(:puts).with(/vagrant plugin uninstall vagrant-alpine/)
|
||||
VagrantPlugins::GuestAlpine::Plugin.check_community_plugin
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,167 @@
|
|||
require_relative "../../../../base"
|
||||
|
||||
describe "VagrantPlugins::GuestDarwin::Cap::MountVmwareSharedFolder" do
|
||||
let(:described_class) do
|
||||
VagrantPlugins::GuestDarwin::Plugin
|
||||
.components
|
||||
.guest_capabilities[:darwin]
|
||||
.get(:mount_vmware_shared_folder)
|
||||
end
|
||||
|
||||
let(:machine) { double("machine", communicate: communicator, id: "MACHINE_ID") }
|
||||
let(:communicator) { double("communicator") }
|
||||
|
||||
before do
|
||||
allow(communicator).to receive(:test)
|
||||
allow(communicator).to receive(:sudo)
|
||||
allow(VagrantPlugins::GuestDarwin::Plugin).to receive(:action_hook)
|
||||
end
|
||||
|
||||
describe ".mount_vmware_shared_folder" do
|
||||
let(:name) { "-vagrant" }
|
||||
let(:guestpath) { "/vagrant" }
|
||||
let(:options) { {} }
|
||||
|
||||
before do
|
||||
allow(described_class).to receive(:system_firmlink?)
|
||||
described_class.reset!
|
||||
end
|
||||
|
||||
after { described_class.
|
||||
mount_vmware_shared_folder(machine, name, guestpath, options) }
|
||||
|
||||
context "with APFS root container" do
|
||||
before do
|
||||
expect(communicator).to receive(:test).with("test -d /System/Volumes/Data").and_return(true)
|
||||
end
|
||||
|
||||
it "should check for existing entry" do
|
||||
expect(communicator).to receive(:test).with(/synthetic\.conf/)
|
||||
end
|
||||
|
||||
it "should register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
|
||||
context "with guest path within existing directory" do
|
||||
let(:guestpath) { "/Users/vagrant/workspace" }
|
||||
|
||||
it "should test if guest path is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -f/)
|
||||
end
|
||||
|
||||
it "should not test if guest path is a directory if guest path is symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).not_to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should test if guest path is directory if not a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a directory" do
|
||||
expect(communicator).to receive(:test).with(/test -d/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -Rf/)
|
||||
end
|
||||
|
||||
it "should create the symlink to the vmware folder" do
|
||||
expect(communicator).to receive(:sudo).with(/ln -s/)
|
||||
end
|
||||
|
||||
it "should create the symlink within the writable APFS container" do
|
||||
expect(communicator).to receive(:sudo).with(%r{ln -s .+/System/Volumes/Data.+})
|
||||
end
|
||||
|
||||
it "should register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
|
||||
context "when firmlink is provided by the system" do
|
||||
before { expect(described_class).to receive(:system_firmlink?).and_return(true) }
|
||||
|
||||
it "should not register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).not_to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context "with non-APFS root container" do
|
||||
before do
|
||||
expect(communicator).to receive(:test).with("test -d /System/Volumes/Data").and_return(false)
|
||||
end
|
||||
|
||||
it "should test if guest path is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -f/)
|
||||
end
|
||||
|
||||
it "should not test if guest path is a directory if guest path is symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -L/).and_return(true)
|
||||
expect(communicator).not_to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should test if guest path is directory if not a symlink" do
|
||||
expect(communicator).to receive(:test).with(/test -d/)
|
||||
end
|
||||
|
||||
it "should remove guest path if it is a directory" do
|
||||
expect(communicator).to receive(:test).with(/test -d/).and_return(true)
|
||||
expect(communicator).to receive(:sudo).with(/rm -Rf/)
|
||||
end
|
||||
|
||||
it "should create the symlink to the vmware folder" do
|
||||
expect(communicator).to receive(:sudo).with(/ln -s/)
|
||||
end
|
||||
|
||||
it "should not register an action hook" do
|
||||
expect(VagrantPlugins::GuestDarwin::Plugin).not_to receive(:action_hook).with(:apfs_firmlinks, :after_synced_folders)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe ".system_firmlink?" do
|
||||
before { described_class.reset! }
|
||||
|
||||
context "when file does not exist" do
|
||||
before { allow(File).to receive(:exist?).with("/usr/share/firmlinks").and_return(false) }
|
||||
|
||||
it "should always return false" do
|
||||
expect(described_class.system_firmlink?("test")).to be_falsey
|
||||
end
|
||||
end
|
||||
|
||||
context "when file does exist" do
|
||||
let(:content) {
|
||||
["/Users\tUsers",
|
||||
"/usr/local\tusr/local"]
|
||||
}
|
||||
|
||||
before do
|
||||
expect(File).to receive(:exist?).with("/usr/share/firmlinks").and_return(true)
|
||||
expect(File).to receive(:readlines).with("/usr/share/firmlinks").and_return(content)
|
||||
end
|
||||
|
||||
it "should return true when firmlink exists" do
|
||||
expect(described_class.system_firmlink?("/Users")).to be_truthy
|
||||
end
|
||||
|
||||
it "should return true when firmlink is not prefixed with /" do
|
||||
expect(described_class.system_firmlink?("Users")).to be_truthy
|
||||
end
|
||||
|
||||
it "should return false when firmlink does not exist" do
|
||||
expect(described_class.system_firmlink?("/testing")).to be_falsey
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,50 @@
|
|||
require_relative "../../../../base"
|
||||
|
||||
require Vagrant.source_root.join("plugins/guests/linux/cap/reboot")
|
||||
|
||||
describe "VagrantPlugins::GuestLinux::Cap::Reboot" do
|
||||
let(:described_class) do
|
||||
VagrantPlugins::GuestLinux::Plugin.components.guest_capabilities[:linux].get(:wait_for_reboot)
|
||||
end
|
||||
|
||||
let(:machine) { double("machine") }
|
||||
let(:guest) { double("guest") }
|
||||
let(:communicator) { VagrantTests::DummyCommunicator::Communicator.new(machine) }
|
||||
let(:ui) { double("ui") }
|
||||
|
||||
before do
|
||||
allow(machine).to receive(:communicate).and_return(communicator)
|
||||
allow(machine).to receive(:guest).and_return(guest)
|
||||
allow(machine.guest).to receive(:ready?).and_return(true)
|
||||
allow(machine).to receive(:ui).and_return(ui)
|
||||
allow(ui).to receive(:info)
|
||||
end
|
||||
|
||||
after do
|
||||
communicator.verify_expectations!
|
||||
end
|
||||
|
||||
describe ".reboot" do
|
||||
it "reboots the vm" do
|
||||
allow(communicator).to receive(:execute)
|
||||
|
||||
expect(communicator).to receive(:execute).with(/reboot/, nil).and_return(0)
|
||||
expect(described_class).to receive(:wait_for_reboot)
|
||||
|
||||
described_class.reboot(machine)
|
||||
end
|
||||
|
||||
context "user output" do
|
||||
before do
|
||||
allow(communicator).to receive(:execute)
|
||||
allow(described_class).to receive(:wait_for_reboot)
|
||||
end
|
||||
|
||||
after { described_class.reboot(machine) }
|
||||
|
||||
it "sends message to user that guest is rebooting" do
|
||||
expect(ui).to receive(:info)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -24,7 +24,7 @@ describe "VagrantPlugins::GuestRedHat::Cap:NFSClient" do
|
|||
it "installs nfs client" do
|
||||
cap.nfs_client_install(machine)
|
||||
expect(comm.received_commands[0]).to match(/install nfs-utils/)
|
||||
expect(comm.received_commands[0]).to match(/\/bin\/systemctl restart rpcbind nfs/)
|
||||
expect(comm.received_commands[0]).to match(/\/bin\/systemctl restart rpcbind nfs-server/)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -25,15 +25,15 @@ describe "VagrantPlugins::GuestSUSE::Cap::ChangeHostName" do
|
|||
let(:basename) { "banana-rama" }
|
||||
|
||||
it "sets the hostname" do
|
||||
comm.stub_command("hostname -f | grep '^#{name}$'", exit_code: 1)
|
||||
comm.stub_command('test "$(hostnamectl --static status)" = "#{basename}"', exit_code: 1)
|
||||
|
||||
cap.change_host_name(machine, name)
|
||||
expect(comm.received_commands[1]).to match(/echo '#{basename}' > \/etc\/HOSTNAME/)
|
||||
expect(comm.received_commands[1]).to match(/hostname '#{basename}'/)
|
||||
expect(comm.received_commands[1]).to match(/hostnamectl set-hostname '#{basename}'/)
|
||||
end
|
||||
|
||||
it "does not change the hostname if already set" do
|
||||
comm.stub_command("hostname -f | grep '^#{name}$'", exit_code: 0)
|
||||
comm.stub_command('test "$(hostnamectl --static status)" = "#{basename}"', exit_code: 0)
|
||||
|
||||
cap.change_host_name(machine, name)
|
||||
expect(comm.received_commands.size).to eq(1)
|
||||
end
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue