Freezing the time and date of an array of VMs - virtual-machine

Is there a way to freeze the time and date of a VM, so that it is not synchronized with the guest BIOS and/or the internet? I have seen some solutions which talk about killing services inside the VM, but I wish to avoid this as it changes the "clean state" of the VM (used for testing purposes).
To clarify: I don't want to set the time offset for a VM, I want to set the exact time that will be passed to the OS at boot time.
From there, is there a way to do this across a large number of VMs?

Ended up solving the issue with a python script.
To use it, you edit the VM_NAMES list to contain the names of the VMs as they appear in VirtualBox, then set the RESET_TIME_VALUE according to the date and time you wish to send the VMs.
If you have installed VirtualBox in a non-default location, edit the VIRTUAL_BOX_MANAGE_PATH variable as well.
To run, call the main method.
import datetime
import subprocess
VIRTUAL_BOX_MANAGE_PATH = r"C:\Program Files\Oracle\VirtualBox\vboxmanage.exe"
SET_EXTRA_DATA_COMMAND = r"setextradata"
GET_HOST_TIME_DISABLE_COMMAND = "\"VBoxInternal/Devices/VMMDev/0/Config/GetHostTimeDisabled\" \"1\""
MODIFY_VM_COMMAND = r"modifyvm"
BIOS_SYSTEM_TIME_OFFSET = r"--biossystemtimeoffset"
# Edit this list to add more Virtual Machines
VM_NAMES = ("xxx",
"yyy",
"zzz")
RESET_TIME_VALUE = datetime.datetime(2014, 6, 7, 13, 0, 0, 0)
def main():
for vm in VM_NAMES:
reset_time(vm)
disable_time_sync(vm)
def reset_time(vm_name):
"""
Resets the VM to the clean install time
"""
args = get_subprocess_args_set_bios_time(vm_name, RESET_TIME_VALUE)
print("Resetting time on VM [" + vm_name + "] to " + str(RESET_TIME_VALUE) + " ...")
subprocess.call(args)
print("\tDone.")
def disable_time_sync(vm_name):
"""
Disables the time synchronization of a VM with the BIOS
"""
args = [
VIRTUAL_BOX_MANAGE_PATH,
SET_EXTRA_DATA_COMMAND,
vm_name,
GET_HOST_TIME_DISABLE_COMMAND
]
print("Disabling time synchronization on VM [" + vm_name + "] ...")
subprocess.call(args)
print("\tDone.")
def get_subprocess_args_set_bios_time(vm_name, datetime_to_set):
"""
Returns a list containing the arguments to pass to the subprocess method
to start the Virtual Box Manage program and set the BIOS time to the supplied value
"""
return [
VIRTUAL_BOX_MANAGE_PATH,
MODIFY_VM_COMMAND,
vm_name,
BIOS_SYSTEM_TIME_OFFSET,
str(get_msec_time_difference(datetime.datetime.now(), datetime_to_set))
]
def get_msec_time_difference(reference_point, check_point):
"""
Computes the offset in msec from the reference point to the check point
"""
return int(round( (check_point - reference_point).total_seconds() * 1000 ))

" To run, call the main method."
..or just add at the end of the file
if __name__ == '__main__':
main()

Related

Gem5: No workload specified

I am having some trouble running the simulation in gem5. I keep getting
no workload specified
# Set the workload and benchmark
process = Process()
process.cmd = ['gzip', '-k', 'test.txt']
system.cpu.workload = process
system.cpu.createThreads()
# Set the workload
print("set workload")
m5.command_line.set_workload('mcf')
print("workload Set")
I tried to print the workload portion but received the same error
# Set the workload
print("set workload")
m5.command_line.set_workload('mcf')
print("workload Set")
If none of the below code works with you, leave a comment and I will share more a complete gem5 project to run SPEC17 and SEC06 besides executable bins.
Here is an example to run one of the SEC17 benchmarks:
bwaves_s = Process()
bwaves_s_dir = '603.bwaves_s/'
bwaves_s_run_dir = bwaves_s_dir + refspeed_run_dir
bwaves_s.executable = bench_dir + bwaves_s_run_dir + 'speed_bwaves' +
exe_suffix
bwaves_s_data = 'bwaves_1.in'
bwaves_s.cmd = [bwaves_s.executable]
bwaves_s.output = 'bwaves_s.out'
bwaves_s.input = bench_dir + bwaves_s_run_dir + bwaves_s_data
system.cpu[0].workload = spec_process "OR" system.cpu.workload =
spec_process
Another example to run a binary
from gem5.components.boards.simple_board import SimpleBoard
from gem5.components.memory.single_channel import SingleChannelDDR3_1600
from gem5.components.processors.simple_processor import SimpleProcessor
from gem5.components.processors.cpu_types import CPUTypes
from gem5.resources.resource import CustomResource
from gem5.simulate.simulator import Simulator
from unique_cache_hierarchy.unique_cache_hierarchy_complete import UniqueCacheHierarchy
Obtain the components.
cache_hierarchy = UniqueCacheHierarchy()
memory = SingleChannelDDR3_1600("1GiB")
processor = SimpleProcessor(cpu_type=CPUTypes.ATOMIC, num_cores=1)
Add them to the board.
board = SimpleBoard(
clk_freq="3GHz", processor=processor, memory=memory, cache_hierarchy=cache_hierarchy
)
Set the workload.
binary = CustomResource(
"materials/using-gem5/02-stdlib/m5-exit-example/m5-exit-example"
)
board.set_se_binary_workload(binary)
Setup the Simulator and run the simulation.
simulator = Simulator(board=board)
simulator.run()

SSH and Ping to hosts concurrently with Python asyncio?

I'm trying to SSH/Ping to hosts concurrently, but I don't see any result so for, probably my implementation isn't correct. This is what I have so far. Any idea appreciated.
import paramiko
import time
import asyncio
import subprocess
async def sshTest(ipaddress,deviceUsername,devicePassword,sshPort): #finalDict
try:
print("Performing SSH Connection to the device")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ipaddress, username=deviceUsername, password=devicePassword, port=sshPort, look_for_keys=False, allow_agent=False)
print("Channel established")
except Exception as e:
print(e)
async def pingf(ip):
p1 = subprocess.Popen(['ping', '-c','5', ip], stdout=subprocess.PIPE)
output = p1.communicate()[0]
print(output)
async def main():
taskA = loop.create_task(sshTest('192.168.255.68','admin','admin','22'))
taskB = loop.create_task(sshTest('192.168.254.108','admin','admin','22'))
taskC = loop.create_task(sshTest('192.168.249.134','admin','admin','22'))
taskD = loop.create_task(sshTest('192.168.254.108','admin','admin','22'))
task1 = loop.create_task(pingf('192.168.255.68'))
task2 = loop.create_task(pingf('192.168.254.108'))
task3 = loop.create_task(pingf('192.168.249.134'))
task4 = loop.create_task(pingf('192.168.254.108'))
await asyncio.wait([taskA,taskB,taskC,taskD,task1,task2,task3,task4])
if __name__ == "__main__":
start = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
end = time.time()
print("The time of execution of above program is :", end-start)
Asyncio is a form of cooperative multitasking. This means that in order for tasks to run concurrently, a task must explicitly yield control back to the scheduler, which in Python means "your tasks need to await on something".
Neither of your tasks ever calls await, so they're not going to run concurrently. What you have right now is going to run serially.
If you want to run ssh connections concurrently, you're going to have to either:
Replace paramiko with something like AsyncSSH, which is written to work with asyncio, or
Use threading or multiprocessing to parallelize your tasks, rather than using asyncio.
Additionally, if you're working with asyncio, anything that involves running an external command (such as your pingf task) is going to need to use asyncio's run_in_executor method.
For the example you've shown here, I would suggest instead using the concurrent.futures module. Your code might end up looking something like this (I've modified the code to run in my test environment and given the sshTest task something to do beyond simply connecting):
import concurrent.futures
import paramiko
import asyncio
import subprocess
def sshTest(ipaddress, deviceUsername, devicePassword, sshPort): # finalDict
try:
print("Performing SSH Connection to the device")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
ipaddress,
username=deviceUsername,
password=devicePassword,
port=sshPort,
look_for_keys=True,
allow_agent=True,
)
stdin, stdout, stderr = client.exec_command("sh -c 'sleep 2; uptime'")
output = stdout.read()
return output
except Exception:
return "failed to connect"
def pingf(ip):
output = subprocess.check_output(["ping", "-c", "5", ip])
return output
def main():
futures = []
with concurrent.futures.ThreadPoolExecutor() as pool:
futures.append(pool.submit(sshTest, "localhost", "root", "admin", "2200"))
futures.append(pool.submit(sshTest, "localhost", "root", "admin", "2201"))
futures.append(pool.submit(sshTest, "localhost", "root", "admin", "2202"))
futures.append(pool.submit(pingf, "192.168.1.1"))
futures.append(pool.submit(pingf, "192.168.1.5"))
futures.append(pool.submit(pingf, "192.168.1.254"))
for future in concurrent.futures.as_completed(futures):
print("return value from task:", future.result())
if __name__ == "__main__":
main()

How to open online reference from IPython?

Is there a way to have IPython open a browser pointed at the appropriate online reference?
Especially for numpy,scipy, matplotlib?
For example, the doc for numpy.linalg.cholesky is pretty hard to read in a terminal.
I don't think there is a direct way to make IPython or any shell to open up documentation online, because the primary job of shells is to let you interact with the things they are shells to.
We could however write a script to open a new tab on a browser with the documentation. Like so:
import webbrowser
docsList = {
"numpy" : lambda x: "https://docs.scipy.org/doc/numpy/reference/generated/" + x + ".html",
"scipy" : lambda x: "https://docs.scipy.org/doc/scipy/reference/generated/" + x + ".html",
"matplotlib" : lambda x: "https://matplotlib.org/api/" + x.split('.')[1] + "_api.html",
"default" : lambda x: "https://www.google.com/search?q=documentation+" + x
}
def online(method_name):
"""
Opens up the documentation for method_name on the default browser.
If the package doesn't match any entry in the dictionary, falls back to
Google.
Usage
-------
>>> lookUp.online("numpy.linalg.cholesky")
>>> lookUp.online("matplotlib.contour")
"""
try:
url = make_url(method_name)
except AttributeError:
print("Enter the method name as a string and try again")
return
webbrowser.open(url, new = 2)
return
def make_url(method_name):
package_name = method_name.split('.')[0]
try:
return docsList[package_name](method_name)
except KeyError:
return docsList["default"](method_name)
You could save the above as "lookUp.py" at a location that Python can find it in, and then import it whenever you need to use it.
Caveats:
This method takes strings as input, so if you call it on a function it'll throw an error.
>>> lookUp.online("numpy.linalg.cholesky")
Will work.
>>> lookUp.online(numpy.linalg.cholesky)
Will ask you to give it as a string.
So use autocomplete to get to the function and then wrap it in quotes to get it to work.

See all of the unique IDs related to a vmware virtual machine

I want to see all of the unique IDs that are specific for a virtual machine such as:
hardware ID, CPU ID, UUID , Mac address and etc.
could anybody please help me to find these IDs??
I can help you finding some of these. For rest of the things you have to search the doc.
Install pyVmomi and run the following code.
EDIT: Changed the code to run on esx host. Simply run it by python .py
Now to know how this code is working. You have to learn Manged Objects. For example here we are working with the Manged Object vm and this object has many properties listed in the doc. So to retrieve uuid of a vm we are invoking vm.config.uuid. Regarding other details you have to go through the VirtualMachine object see what all properties you nedd.
import sys
import atexit
import time
from pyVmomi import vim, vmodl
from pyVim.connect import Disconnect
from pyVim import connect
inputs = {'esx_ip': '15.22.10.10',
'esx_password': 'Password123',
'esx_user': 'root',
'vm_name': 'ubuntu',
}
def wait_for_task(task, actionName='job', hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def main():
si = None
try:
print "Trying to connect ..."
si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'])
except IOError, e:
pass
if not si:
print "Cannot connect to specified host using specified username and password"
sys.exit()
print "Connected to vcenter!"
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Get the VirtualMachine Object
vm = get_obj(content, [vim.VirtualMachine], inputs['vm_name'])
print "GuestID: ", vm.config.guestId
print "UUID: ", vm.config.uuid
print "Version: ", vm.config.version
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
print "MAC Address: ", device.macAddress
#Example of changing UUID:
new_uuid = '423ffff0-5d62-d040-248c-4538ae2c734f'
vmconf = vim.vm.ConfigSpec()
vmconf.uuid = new_uuid
task = vm.ReconfigVM_Task(vmconf)
wait_for_task(task, si)
print "Successfully changed UUID"
print "New UUID: ", vm.config.uuid
if __name__ == "__main__":
main()

redis move all keys

is it possible to use redis's MOVE command to move all keys from 1 database to another? The move command only moves 1 key, but I need to move all the keys in the database.
I would recommend taking a look at the following alpha version app to backup and restore redis databases.. (you can install it via gem install redis-dump). You could redis-dump your databaseand then redis-load into another database via the --database argument.
redis-dump project
If this doesn't fit your purposes, you may need to make use of a scripting language's redis bindings (or alternatively throw something together using bash / redis-cli / xargs, etc). If you need assistance along these lines then we probably need more details first.
I've wrote a small python script to move data between two redis servers:(only support list and string types, and you must install python redis client):
'''
Created on 2011-11-9
#author: wuyi
'''
import redis
from optparse import OptionParser
import time
def mv_str(r_source, r_dest, quiet):
keys = r_source.keys("*")
for k in keys:
if r_dest.keys(k):
print "skipping %s"%k
continue
else:
print "copying %s"%k
r_dest.set(k, r_source.get(k))
def mv_list(r_source, r_dest, quiet):
keys = r_source.keys("*")
for k in keys:
length = r_source.llen(k)
i = 0
while (i<length):
print "add queue no.:%d"%i
v = r_source.lindex(k, i)
r_dest.rpush(k, v)
i += 1
if __name__ == "__main__":
usage = """usage: %prog [options] source dest"""
parser = OptionParser(usage=usage)
parser.add_option("-q", "--quiet", dest="quiet",
default = False, action="store_true",
help="quiet mode")
parser.add_option("-p", "--port", dest="port",
default = 6380,
help="port for both source and dest")
parser.add_option("", "--dbs", dest="dbs",
default = "0",
help="db list: 0 1 120 220...")
parser.add_option("-t", "--type", dest="type",
default = "normal",
help="available types: normal, lpoplist")
parser.add_option("", "--tmpdb", dest="tmpdb",
default = 0,
help="tmp db number to store tmp data")
(options, args) = parser.parse_args()
if not len(args) == 2:
print usage
exit(1)
source = args[0]
dest = args[1]
if source == dest:
print "dest must not be the same as source!"
exit(2)
dbs = options.dbs.split(' ')
for db in dbs:
r_source = redis.Redis(host=source, db=db, password="", port=int(options.port))
r_dest = redis.Redis(host=dest, db=db, password="", port=int(options.port))
print "______________db____________:%s"%db
time.sleep(2)
if options.type == "normal":
mv_str(r_source, r_dest, options.quiet)
elif options.type == "lpoplist":
mv_list(r_source, r_dest, options.quiet)
del r_source
del r_dest
you can try my own tool, rdd
it's a command line utility,
can dump database to a file, work on it (filter, match, merge, ...), and back it in a redis instance
take care, alpha stage, https://github.com/r043v/rdd/
Now that redis has scripting using lua, you can easily write a command that loops through all the keys, checks their type and moves them accordingly to a new database.
I suggest you can try it as below:
1. copy the rdb file to another dir;
2. modify the rdb file name;
3. modify the redis configure file adapter to the new db;