retrieving logs from mercurial using api - api

I would like to retrieve logs from a mercurial repository using mercurial commands api. Unfortunately, mercurial.commands.log prints the messages to the stdout, instead of returning some nice list of revisions, like e.g. pysvn does. Can the be achieved easily? I would like to add mercurial support to my program and would like to do this as easily, as it's possible.

You should do something along the lines of this:
from mercurial import ui, hg
u = ui.ui()
repo = hg.repo()
for rev in repo:
print repo[rev]
the subscripted object is a context object. It has useful methods like description(), branch(), and user(). For a complete list of what it can do, see the source (or do a dir() on it).

The simple answer is to use ui.pushbuffer() right before you call the log command and log_output = ui.popbuffer() right after you call it. By doing that log_output will contain the output of the log command.
Are you actually looking for the straight log output though, or do you really want the diff or some other kind of data? If we know what exactly you're trying to get (for example: "the commit messages of every changeset between X and Y") we might be able to show you a better way.
EDIT: Take a look at the Mercurial API wiki page to see how to get most of the common information from repo and ctx objects.

yeah I had the same problem.. seems as its as designed to disallow retrieving logs remotely. The web interface gives a little rss feed but it wasn't enough of a history for me. So we created our own customised rss feed...
its not the most elaborate of things and is customised to our liking, you can mix the fields around in print_item() to change the look of the feed. You could also mod it to return log info on specific changesets if needed.
You will have to add a script alias to apache, something like (See http://httpd.apache.org/docs/2.0/howto/cgi.html for more info):
ScriptAlias /feed.cgi /usr/local/systems/hg/script/feed.cgi
feed.cgi file contents:
#!/usr/bin/env python2.5
# -*- python -*-
"""
Creates a rss feed from commit log messages in a repository/branch.
Can be filtered on commit logs from a set date eg date=2009-12-12
or by a number of days previous eg. days=7
Usage:
* retrieve all logs: http://hg.server/feed.cgi?repository=MyRepo
* retrieve logs from set date: http://hg.server/feed.cgi?repository=DMyRepo&date=2009-11-11
* retrieve logs from last 77 days: http://hg.server/feed.cgi?repository=DMyRepo&days=77
* retrieve all logs from a branch: http://hg.server/feed.cgi?repository=MyRepo&branch=myBranch
Script Location on server: /usr/local/systems/hg/script/feed.cgi
"""
defaultdateformats = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %I:%M:%S%p',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %I:%M%p',
'%Y-%m-%d',
'%m-%d',
'%m/%d',
'%m/%d/%y',
'%m/%d/%Y',
'%a %b %d %H:%M:%S %Y',
'%a %b %d %I:%M:%S%p %Y',
'%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
'%b %d %H:%M:%S %Y',
'%b %d %I:%M:%S%p %Y',
'%b %d %H:%M:%S',
'%b %d %I:%M:%S%p',
'%b %d %H:%M',
'%b %d %I:%M%p',
'%b %d %Y',
'%b %d',
'%H:%M:%S',
'%I:%M:%S%p',
'%H:%M',
'%I:%M%p',
)
import os, sys, cgi, cgitb, datetime, time
cgitb.enable()
from mercurial import ui, hg, util
from mercurial.node import short
def find_repository(name):
base = '/usr/local/systems/hg/repos/'
path = os.path.join(base, name)
repos = hg.repository(None, path)
return repos
def find_changes(repos, branch, date):
# returns true if d2 is newer than d1
def newerDate(d1, d2):
d1 = datetime.datetime.fromtimestamp(d1)
d2 = datetime.datetime.fromtimestamp(d2)
return d1 < d2
#for ctx in repos.changelog:
# print ctx
changes = repos.changelog
out = []
# filter on branch
if branch != '':
changes = [change for change in changes if repos.changectx(change).branch() == branch ]
# filter on date
if date != '':
changes = [change for change in changes if newerDate(date, repos.changectx(change).date()[0]) ]
return changes
def print_item(change, link_template):
def _element(name, content):
content = cgi.escape(content)
print " <%(name)s>%(content)s</%(name)s>" % {
'name': name,
'content': content
}
link = link_template % {'node': short(change.node())}
print " <item>"
_element('title', str(change.rev()))
_element('description', change.description())
_element('guid', str(change.rev()))
_element('author', change.user())
_element('link', link)
_element('pubdate', str(datetime.datetime.fromtimestamp(change.date()[0])))
print " </item>"
def print_rss(changes, repos, template):
print """<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
<channel>
<link>N/A</link>
<language>en-us</language>
<title>Changelog</title>
<description>Changelog</description>
"""
for change in changes:
ctx = repos.changectx(change)
print_item(ctx, template)
print """
</channel>
</rss>
"""
if __name__=="__main__":
# -*- python -*-
print "Content-Type: application/rss+xml; charset=UTF-8"
print
f = cgi.FieldStorage()
if not f.has_key("repository"):
print "Need to specify repository."
sys.exit()
repository = f['repository'].value
branch = ''
if f.has_key('branch'):
branch = f['branch'].value
date = ''
if f.has_key('date') and not f.has_key('days'):
try:
#date = datetime.datetime.strptime(f['date'].value, '%Y-%m-%d')
date = util.parsedate(f['date'].value)[0]
except:
print 'Error in date format, use one of the following formats:', defaultdateformats
sys.exit()
elif f.has_key('days') and not f.has_key('date'):
days = int(f['days'].value)
try:
date = datetime.datetime.now() - datetime.timedelta(days=days)
date = time.mktime(date.timetuple())
except:
print 'Error in days, please use a standard number eg. days=7'
sys.exit()
elif f.has_key('days') and f.has_key('date'):
print 'Error, please only supply a dayrange OR a date, not both'
sys.exit()
repos = find_repository(repository)
changes = find_changes(repos, branch, date)
rev_link_template = 'http://hg.server/hg/%(repos)s/rev/%%(node)s' % {
'repos': repository
}
print_rss(changes, repos, rev_link_template)

Related

How to extract the [Documentation] text from Robot framework test case

I am trying to extract the content of the [Documentation] section as a string for comparision with other part in a Python script.
I was told to use Robot framework API https://robot-framework.readthedocs.io/en/stable/
to extract but I have no idea how.
However, I am required to work with version 3.1.2
Example:
*** Test Cases ***
ATC Verify that Sensor Battery can enable and disable manufacturing mode
[Documentation] E1: This is the description of the test 1
... E2: This is the description of the test 2
[Tags] E1 TRACE{Trace_of_E1}
... E2 TRACE{Trace_of_E2}
Extract the string as
E1: This is the description of the test 1
E2: This is the description of the test 2
Have a look at these examples. I did something similar to generate testplans descritio. I tried to adapt my code to your requirements and this could maybe work for you.
import os
import re
from robot.api.parsing import (
get_model, get_tokens, Documentation, EmptyLine, KeywordCall,
ModelVisitor, Token
)
class RobotParser(ModelVisitor):
def __init__(self):
# Create object with remarkup_text to store formated documentation
self.text = ''
def get_text(self):
return self.text
def visit_TestCase(self, node):
# The matched `TestCase` node is a block with `header` and
# `body` attributes. `header` is a statement with familiar
# `get_token` and `get_value` methods for getting certain
# tokens or their value.
for keyword in node.body:
# skip empty lines
if keyword.get_value(Token.DOCUMENTATION) == None:
continue
self.text += keyword.get_value(Token.ARGUMENT)
def visit_Documentation(self,node):
# The matched "Documentation" node with value
self.remarkup_text += node.value + self.new_line
def visit_File(self, node):
# Call `generic_visit` to visit also child nodes.
return self.generic_visit(node)
if __name__ == "__main__":
path = "../tests"
for filename in os.listdir(path):
if re.match(".*\.robot", filename):
model = get_model(os.path.join(path, filename))
robot_parser = RobotParser()
robot_parser.visit(model)
text=robot_parser._text()
The code marked as best answer didn't quite work for me and has a lot of redundancy but it inspired me enough to get into the parsing and write it in a much readable and efficient way that actually works as is. You just have to have your own way of generating & iterating through filesystem where you call the get_robot_metadata(filepath) function.
from robot.api.parsing import (get_model, ModelVisitor, Token)
class RobotParser(ModelVisitor):
def __init__(self):
self.testcases = {}
def visit_TestCase(self, node):
testcasename = (node.header.name)
self.testcases[testcasename] = {}
for section in node.body:
if section.get_value(Token.DOCUMENTATION) != None:
documentation = section.value
self.testcases[testcasename]['Documentation'] = documentation
elif section.get_value(Token.TAGS) != None:
tags = section.values
self.testcases[testcasename]['Tags'] = tags
def get_testcases(self):
return self.testcases
def get_robot_metadata(filepath):
if filepath.endswith('.robot'):
robot_parser = RobotParser()
model = get_model(filepath)
robot_parser.visit(model)
metadata = robot_parser.get_testcases()
return metadata
This function will be able to extract the [Documentation] section from the testcase:
def documentation_extractor(testcase):
documentation = []
for setting in testcase.settings:
if len(setting) > 2 and setting[1].lower() == "[documentation]":
for doc in setting[2:]:
if doc.startswith("#"):
# the start of a comment, so skip rest of the line
break
documentation.append(doc)
break
return "\n".join(documentation)

How to increase efficiency of perl script which uses sqlplus

I have this perl script which takes the data from sqlplus database... this database adds a new entry every time when there is a change in the value of state for a particular serial number. Now we need to pick the entries at every state change and prepare a csv file with old state, new state and other fields. db table sample.
SERIALNUMBER STATE AT OPERATORID SUBSCRIBERID TRANSACTIONID
51223344558899 Available 20081008T10:15:47 vsuser
51223344558857 Available 20081008T10:15:49 vsowner
51223344558899 Used 20081008T10:20:25 vsuser
51223344558860 Stolen 20081008T10:15:49 vsanyone
51223344558857 Damaged 20081008T10:50:49 vsowner
51223344558899 Damaged 20081008T10:50:25 vsuser
51343253335355 Available 20081008T11:15:47 vsindian
my script:
#! /usr/bin/perl
#use warnings;
use strict;
#my $circle =
#my $schema =
my $basePath = "/scripts/Voucher-State-Change";
#my ($sec, $min, $hr, $day, $month, $years) = localtime(time);
#$years_+=1900;$mont_+=1;
#my $timestamp=sprintf("%d%02d%02d",$years,$mont,$moday);
sub getDate {
my $daysago=shift;
$daysago=0 unless ($daysago);
#my #months=qw(Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec);
my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time-(86400*$daysago));
# YYYYMMDD, e.g. 20060126
return sprintf("%d%02d%02d",$year+1900,$mon+1,$mday);
}
my $filedate=getDate(1);
#my $startdate="${filedate}T__:__:__";
my $startdate="20081008T__:__:__";
print "$startdate\n";
##### Generating output file---
my $outputFile = "${basePath}/VoucherStateChangeReport.$filedate.csv";
open (WFH, ">", "$outputFile") or die "Can't open output file $outputFile for writing: $!\n";
print WFH "VoucherSerialNumber,Date,Time,OldState,NewState,UserId\n";
##### Generating log file---
my $logfile = "${basePath}/VoucherStateChange.$filedate.log";
open (STDOUT, ">>", "$logfile") or die "Can't open logfile $logfile for writing: $!\n";
open (STDERR, ">>", "$logfile") or die "Can't open logfile $logfile for writing: $!\n";
print "$logfile\n";
##### Now login to sqlplus-----
my $SQLPLUS='/opt/oracle/product/11g/db_1/bin/sqlplus -S system/coolman7#vsdb';
`$SQLPLUS \#${basePath}/VoucherQuery1.sql $startdate> ${basePath}/QueryResult1.txt`;
open (FH1, "${basePath}/QueryResult1.txt");
while (my $serial = <FH1>) {
chomp ($serial);
my $count = `$SQLPLUS \#${basePath}/VoucherQuery2.sql $serial $startdate`;
chomp ($count);
$count =~ s/\s+//g;
#print "$count\n";
next if $count == 1;
`$SQLPLUS \#${basePath}/VoucherQuery3.sql $serial $startdate> ${basePath}/QueryResult3.txt`;
# print "select * from sample where SERIALNUMBER = $serial----\n";
open (FH3, "${basePath}/QueryResult3.txt");
my ($serial_number, $state, $at, $operator_id);
my $count1 = 0;
my $old_state;
while (my $data = <FH3>) {
chomp ($data);
#print $data."\n";
my #data = split (/\s+/, $data);
my ($serial_number, $state, $at, $operator_id) = #data[0..3];
#my $serial_number = $data[0];
#my $state = $data[1];
#my $at = $data[2];
#my $operator_id = $data[3];
$count1++;
if ($count1 == 1) {
$old_state = $data[1];
next;
}
my ($date, $time) = split (/T/, $at);
$date =~ s/(\d{4})(\d{2})(\d{2})/$1-$2-$3/;
print WFH "$serial_number,$date,$time,$old_state,$state,$operator_id\n";
$old_state = $data[1];
}
}
close(WFH);
query in VoucherQuery1.sql:
select distinct SERIALNUMBER from sample where AT like '&1';
query in VoucherQuery2.sql:
select count(*) from sample where SERIALNUMBER = '&1' and AT like '&2';
query in VoucherQuery2.sql:
select * from sample where SERIALNUMBER = '&1' and AT like '&2';
and my sample output:
VoucherSerialNumber,Date,Time,OldState,NewState,UserId
51223344558857,2008-10-08,10:50:49,Available,Damaged,vsowner
51223344558899,2008-10-08,10:20:25,Available,Used,vsuser
51223344558899,2008-10-08,10:50:25,Used,Damaged,vsuser
Script is working pretty fine. But problem is that actual db table has millions of records for a specific day... and therefore it is raising performance issues... could you please advise how can we improve the efficiency of this script in terms of time & load. Only restriction is that I can't use DBI module for this...
Also in case of any error in the sql queries, error msg is coming to QueryResult?.txt files. I want to handle and receive these errors in my log file. how this can be accomplished? thanks
I think you need to tune your query. A good starting point is to use the EXPLAIN PLAN, if it is an Oracle database.

See all of the unique IDs related to a vmware virtual machine

I want to see all of the unique IDs that are specific for a virtual machine such as:
hardware ID, CPU ID, UUID , Mac address and etc.
could anybody please help me to find these IDs??
I can help you finding some of these. For rest of the things you have to search the doc.
Install pyVmomi and run the following code.
EDIT: Changed the code to run on esx host. Simply run it by python .py
Now to know how this code is working. You have to learn Manged Objects. For example here we are working with the Manged Object vm and this object has many properties listed in the doc. So to retrieve uuid of a vm we are invoking vm.config.uuid. Regarding other details you have to go through the VirtualMachine object see what all properties you nedd.
import sys
import atexit
import time
from pyVmomi import vim, vmodl
from pyVim.connect import Disconnect
from pyVim import connect
inputs = {'esx_ip': '15.22.10.10',
'esx_password': 'Password123',
'esx_user': 'root',
'vm_name': 'ubuntu',
}
def wait_for_task(task, actionName='job', hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def main():
si = None
try:
print "Trying to connect ..."
si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'])
except IOError, e:
pass
if not si:
print "Cannot connect to specified host using specified username and password"
sys.exit()
print "Connected to vcenter!"
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Get the VirtualMachine Object
vm = get_obj(content, [vim.VirtualMachine], inputs['vm_name'])
print "GuestID: ", vm.config.guestId
print "UUID: ", vm.config.uuid
print "Version: ", vm.config.version
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
print "MAC Address: ", device.macAddress
#Example of changing UUID:
new_uuid = '423ffff0-5d62-d040-248c-4538ae2c734f'
vmconf = vim.vm.ConfigSpec()
vmconf.uuid = new_uuid
task = vm.ReconfigVM_Task(vmconf)
wait_for_task(task, si)
print "Successfully changed UUID"
print "New UUID: ", vm.config.uuid
if __name__ == "__main__":
main()

How to delete or purge old files on S3?

Are there existing solutions to delete any files older than x days?
Amazon has introduced object expiration recently.
Amazon S3 Announces Object Expiration
Amazon S3 announced a new
feature, Object Expiration that allows you to schedule the deletion of
your objects after a pre-defined time period. Using Object Expiration
to schedule periodic removal of objects eliminates the need for you
to identify objects for deletion and submit delete requests to Amazon
S3.
You can define Object Expiration rules for a set of objects in
your bucket. Each Object Expiration rule allows you to specify a
prefix and an expiration period in days. The prefix field (e.g.
logs/) identifies the object(s) subject to the expiration rule, and
the expiration period specifies the number of days from creation date
(i.e. age) after which object(s) should be removed. Once the objects
are past their expiration date, they will be queued for deletion. You
will not be billed for storage for objects on or after their
expiration date.
Here is some info on how to do it...
http://docs.amazonwebservices.com/AmazonS3/latest/dev/ObjectExpiration.html
Hope this helps.
Here is how to implement it using a CloudFormation template:
JenkinsArtifactsBucket:
Type: "AWS::S3::Bucket"
Properties:
BucketName: !Sub "jenkins-artifacts"
LifecycleConfiguration:
Rules:
- Id: "remove-old-artifacts"
ExpirationInDays: 3
NoncurrentVersionExpirationInDays: 3
Status: Enabled
This creates a lifecycle rule as explained by #Ravi Bhatt
Read more on that:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html
How object lifecycle management works:
https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
You can use AWS S3 Life cycle rules to expire the files and delete them. All you have to do is select the bucket, click on "Add lifecycle rules" button and configure it and AWS will take care of them for you.
You can refer the below blog post from Joe for step by step instructions. It's quite simple actually:
https://www.joe0.com/2017/05/24/amazon-s3-how-to-delete-files-older-than-x-days/
Hope it helps!
Here is a Python script to delete N days old files
from boto3 import client, Session
from botocore.exceptions import ClientError
from datetime import datetime, timezone
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--access_key_id', required=True)
parser.add_argument('--secret_access_key', required=True)
parser.add_argument('--delete_after_retention_days', required=False, default=15)
parser.add_argument('--bucket', required=True)
parser.add_argument('--prefix', required=False, default="")
parser.add_argument('--endpoint', required=True)
args = parser.parse_args()
access_key_id = args.access_key_id
secret_access_key = args.secret_access_key
delete_after_retention_days = int(args.delete_after_retention_days)
bucket = args.bucket
prefix = args.prefix
endpoint = args.endpoint
# get current date
today = datetime.now(timezone.utc)
try:
# create a connection to Wasabi
s3_client = client(
's3',
endpoint_url=endpoint,
access_key_id=access_key_id,
secret_access_key=secret_access_key)
except Exception as e:
raise e
try:
# list all the buckets under the account
list_buckets = s3_client.list_buckets()
except ClientError:
# invalid access keys
raise Exception("Invalid Access or Secret key")
# create a paginator for all objects.
object_response_paginator = s3_client.get_paginator('list_object_versions')
if len(prefix) > 0:
operation_parameters = {'Bucket': bucket,
'Prefix': prefix}
else:
operation_parameters = {'Bucket': bucket}
# instantiate temp variables.
delete_list = []
count_current = 0
count_non_current = 0
print("$ Paginating bucket " + bucket)
for object_response_itr in object_response_paginator.paginate(**operation_parameters):
for version in object_response_itr['Versions']:
if version["IsLatest"] is True:
count_current += 1
elif version["IsLatest"] is False:
count_non_current += 1
if (today - version['LastModified']).days > delete_after_retention_days:
delete_list.append({'Key': version['Key'], 'VersionId': version['VersionId']})
# print objects count
print("-" * 20)
print("$ Before deleting objects")
print("$ current objects: " + str(count_current))
print("$ non-current objects: " + str(count_non_current))
print("-" * 20)
# delete objects 1000 at a time
print("$ Deleting objects from bucket " + bucket)
for i in range(0, len(delete_list), 1000):
response = s3_client.delete_objects(
Bucket=bucket,
Delete={
'Objects': delete_list[i:i + 1000],
'Quiet': True
}
)
print(response)
# reset counts
count_current = 0
count_non_current = 0
# paginate and recount
print("$ Paginating bucket " + bucket)
for object_response_itr in object_response_paginator.paginate(Bucket=bucket):
if 'Versions' in object_response_itr:
for version in object_response_itr['Versions']:
if version["IsLatest"] is True:
count_current += 1
elif version["IsLatest"] is False:
count_non_current += 1
# print objects count
print("-" * 20)
print("$ After deleting objects")
print("$ current objects: " + str(count_current))
print("$ non-current objects: " + str(count_non_current))
print("-" * 20)
print("$ task complete")
And here is how I run it
python s3_cleanup.py --aws_access_key_id="access-key" --aws_secret_access_key="secret-key-here" --endpoint="https://s3.us-west-1.wasabisys.com" --bucket="ondemand-downloads" --prefix="" --delete_after_retention_days=5
If you want to delete files only from a specific folder then use prefix parameter
You can use the following Powershell script to delete object expired after x days.
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$BUCKET_NAME, #Name of the Bucket
[Parameter(Mandatory=$True)]
[string]$OBJ_PATH, #Key prefix of s3 object (directory path)
[Parameter(Mandatory=$True)]
[string]$EXPIRY_DAYS #Number of days to expire
)
$CURRENT_DATE = Get-Date
$OBJECTS = Get-S3Object $BUCKET_NAME -KeyPrefix $OBJ_PATH
Foreach($OBJ in $OBJECTS){
IF($OBJ.key -ne $OBJ_PATH){
IF(($CURRENT_DATE - $OBJ.LastModified).Days -le $EXPIRY_DAYS){
Write-Host "Deleting Object= " $OBJ.key
Remove-S3Object -BucketName $BUCKET_NAME -Key $OBJ.Key -Force
}
}
}

redis move all keys

is it possible to use redis's MOVE command to move all keys from 1 database to another? The move command only moves 1 key, but I need to move all the keys in the database.
I would recommend taking a look at the following alpha version app to backup and restore redis databases.. (you can install it via gem install redis-dump). You could redis-dump your databaseand then redis-load into another database via the --database argument.
redis-dump project
If this doesn't fit your purposes, you may need to make use of a scripting language's redis bindings (or alternatively throw something together using bash / redis-cli / xargs, etc). If you need assistance along these lines then we probably need more details first.
I've wrote a small python script to move data between two redis servers:(only support list and string types, and you must install python redis client):
'''
Created on 2011-11-9
#author: wuyi
'''
import redis
from optparse import OptionParser
import time
def mv_str(r_source, r_dest, quiet):
keys = r_source.keys("*")
for k in keys:
if r_dest.keys(k):
print "skipping %s"%k
continue
else:
print "copying %s"%k
r_dest.set(k, r_source.get(k))
def mv_list(r_source, r_dest, quiet):
keys = r_source.keys("*")
for k in keys:
length = r_source.llen(k)
i = 0
while (i<length):
print "add queue no.:%d"%i
v = r_source.lindex(k, i)
r_dest.rpush(k, v)
i += 1
if __name__ == "__main__":
usage = """usage: %prog [options] source dest"""
parser = OptionParser(usage=usage)
parser.add_option("-q", "--quiet", dest="quiet",
default = False, action="store_true",
help="quiet mode")
parser.add_option("-p", "--port", dest="port",
default = 6380,
help="port for both source and dest")
parser.add_option("", "--dbs", dest="dbs",
default = "0",
help="db list: 0 1 120 220...")
parser.add_option("-t", "--type", dest="type",
default = "normal",
help="available types: normal, lpoplist")
parser.add_option("", "--tmpdb", dest="tmpdb",
default = 0,
help="tmp db number to store tmp data")
(options, args) = parser.parse_args()
if not len(args) == 2:
print usage
exit(1)
source = args[0]
dest = args[1]
if source == dest:
print "dest must not be the same as source!"
exit(2)
dbs = options.dbs.split(' ')
for db in dbs:
r_source = redis.Redis(host=source, db=db, password="", port=int(options.port))
r_dest = redis.Redis(host=dest, db=db, password="", port=int(options.port))
print "______________db____________:%s"%db
time.sleep(2)
if options.type == "normal":
mv_str(r_source, r_dest, options.quiet)
elif options.type == "lpoplist":
mv_list(r_source, r_dest, options.quiet)
del r_source
del r_dest
you can try my own tool, rdd
it's a command line utility,
can dump database to a file, work on it (filter, match, merge, ...), and back it in a redis instance
take care, alpha stage, https://github.com/r043v/rdd/
Now that redis has scripting using lua, you can easily write a command that loops through all the keys, checks their type and moves them accordingly to a new database.
I suggest you can try it as below:
1. copy the rdb file to another dir;
2. modify the rdb file name;
3. modify the redis configure file adapter to the new db;