Create data source for Weblogic 12.2.1.3 in offline mode - weblogic

I want to create Weblogic data source using WLST in offline mode and I'm getting error when I want to create Properties:
create('my_Prop','Properties')
Here is the entire script:
readDomain('C:\\weblogic12213\\user_projects\\domains\\myDomain')
cd('/')
create('myJDBC', 'JDBCSystemResource')
cd('/JDBCSystemResource/myJDBC')
set('Target','myApp')
cd('/JDBCSystemResource/myJDBC/JdbcResource/myJDBC')
cmo.setName('myJDBC')
create('myJDBC','JDBCDataSourceParams')
cd('JDBCDataSourceParams/myJDBC')
set('JNDIName', java.lang.String('jdbc.myJDBC'))
set('GlobalTransactionsProtocol', java.lang.String('OnePhaseCommit'))
cd('/JDBCSystemResource/myJDBC/JdbcResource/myJDBC')
create('myJDBC','JDBCDriverParams')
cd('JDBCDriverParams/myJDBC')
set('DriverName','weblogic.jdbc.sqlserver.SQLServerDriver')
set('URL','jdbc:weblogic:sqlserver://localhost:1433;allowPortWithNamedInstance=true')
set('PasswordEncrypted', 'myPassword')
set('UseXADataSourceInterface', 'false')
create('my_Prop','Properties')
cd('Properties/myJDBC')
create('user','Property')
cd('Property/user')
set('Value', 'myUser')
cd('/JDBCSystemResource/myJDBC/JdbcResource/myJDBC')
create('myJDBC','JDBCConnectionPoolParams')
cd('JDBCConnectionPoolParams/myJDBC')
set('TestTableName','SQL SELECT 1')
updateDomain()
closeDomain()
exit()
This error appears:
com.oracle.cie.domain.script.jython.WLSTException: Could not create generic operation:Properties
#com.oracle.cie.domain.operation.OperationBuilder.createConfigOperation(OperationBuilder.java:342)
at com.oracle.cie.domain.script.jython.CommandExceptionHandler.handleException(CommandExceptionHandler.java:69)
at com.oracle.cie.domain.script.jython.WLScriptContext.handleException(WLScriptContext.java:2983)
Does anybody have any idea please?

I suppose you already found the solution. This works for me without errors.
# cd into the already created driver params
cd('/JDBCSystemResource/myJDBC/JdbcResource/myJDBC/JDBCDriverParams/NO_NAME_0')
create('properties','Properties')
cd('Properties/NO_NAME_0')
create('property','Property')
cd('Property/property')
set("Key", "key")
set("Value", "value")

"""
This script configures a JDBC data source as a System Module and deploys it
to the server
"""
url='t3://' + sys.argv[1] + ':' + sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
connect(username,password,url)
edit()
# Change these names as necessary
dsname="myJDBCDataSource"
server=sys.argv[5]
cd("Servers/"+server)
target=cmo
cd("../..")
startEdit()
# start creation
print 'Creating JDBCSystemResource with name '+dsname
jdbcSR = create(dsname,"JDBCSystemResource")
theJDBCResource = jdbcSR.getJDBCResource()
theJDBCResource.setName("myJDBCDataSource")
connectionPoolParams = theJDBCResource.getJDBCConnectionPoolParams()
connectionPoolParams.setConnectionReserveTimeoutSeconds(25)
connectionPoolParams.setMaxCapacity(100)
connectionPoolParams.setTestTableName("SYSTABLES")
dsParams = theJDBCResource.getJDBCDataSourceParams()
dsParams.addJNDIName("ds.myJDBCDataSource")
driverParams = theJDBCResource.getJDBCDriverParams()
driverParams.setUrl("jdbc:derby://localhost:1527/examples;create=true")
driverParams.setDriverName("org.apache.derby.jdbc.ClientXADataSource")
# driverParams.setUrl("jdbc:oracle:thin:#my-oracle-server:my-oracle-server-port:my-oracle-sid")
# driverParams.setDriverName("oracle.jdbc.driver.OracleDriver")
driverParams.setPassword("examples")
# driverParams.setLoginDelaySeconds(60)
driverProperties = driverParams.getProperties()
proper = driverProperties.createProperty("user")
#proper.setName("user")
proper.setValue("examples")
proper1 = driverProperties.createProperty("DatabaseName")
#proper1.setName("DatabaseName")
proper1.setValue("examples")
jdbcSR.addTarget(target)
save()
activate(block="true")
print 'Done configuring the data source'`enter code here`

Related

How to create a fake StringSession for unit tests

I've got some code which uses StringSession to talk to the Telegram API using telethon.
In my unit tests, I'm trying to instantiate a mocked TelegramClient, passing it a StringSession(myvalue) object as the first parameter. The real code works fine, but I need a fake session string for 'myvalue', to use in my unit tests (where I have a mocked telegram client).
How can I create a dummy value for 'myvalue' which will successfully execute StringSession(myvalue)?
Currently, my tests are dying here:
self = <telethon.sessions.string.StringSession object at 0x7f0777492ad0>
string = 'dummyxxx'
def __init__(self, string: str = None):
super().__init__()
if string:
if string[0] != CURRENT_VERSION:
raise ValueError('Not a valid string')
string = string[1:]
ip_len = 4 if len(string) == 352 else 16
> self._dc_id, ip, self._port, key = struct.unpack(
_STRUCT_PREFORMAT.format(ip_len), StringSession.decode(string))
E struct.error: unpack requires a buffer of 275 bytes
If you don't need a valid session to start with, you can also use MemorySession instead:
from telethon.sessions import MemorySession
session = MemorySession()
# use session variable when creating the client
Someone posted an answer which helped point me in the right direction, but they later deleted it for some reason.
In case it helps anyone else, here is the code that worked for me:
import struct
import base64
from telethon.sessions import StringSession
_STRUCT_PREFORMAT = '>B{}sH256s'
CURRENT_VERSION = '1'
dc_id = 1
ip = b'\x7f\x00\x00\x01' # 127.0.0.1
port = 80
key = b'\x00' * 256
string = StringSession.encode(struct.pack(
_STRUCT_PREFORMAT.format(len(ip)),
dc_id,
ip,
port,
key
))
myvalue = CURRENT_VERSION + string
# Create the StringSession object using the dummy value to confirm it works
session = StringSession(myvalue)
print(myvalue)

Dropbox - Automatic Refresh token Using oauth 2.0 with offlineaccess

I now: the automatic token refreshing is not a new topic.
This is the use case that generate my problem: let's say that we want extract data from Dropbox. Below you can find the code: for the first time works perfectly: in fact 1) the user goes to the generated link; 2) after allow the app coping and pasting the authorization code in the input box.
The problem arise when some hours after the user wants to do the same operation. How to avoid or by-pass the newly generation of authorization code and go straight to the operation?enter code here
As you can see in the code in a short period is possible reinject the auth code inside the code (commented in the code). But after 1 hour or more this is not loger possible.
Any help is welcome.
#!/usr/bin/env python3
import dropbox
from dropbox import DropboxOAuth2FlowNoRedirect
'''
Populate your app key in order to run this locally
'''
APP_KEY = ""
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, use_pkce=True, token_access_type='offline')
target='/DVR/DVR/'
authorize_url = auth_flow.start()
print("1. Go to: " + authorize_url)
print("2. Click \"Allow\" (you might have to log in first).")
print("3. Copy the authorization code.")
auth_code = input("Enter the authorization code here: ").strip()
#auth_code="3NIcPps_UxAAAAAAAAAEin1sp5jUjrErQ6787_RUbJU"
try:
oauth_result = auth_flow.finish(auth_code)
except Exception as e:
print('Error: %s' % (e,))
exit(1)
with dropbox.Dropbox(oauth2_refresh_token=oauth_result.refresh_token, app_key=APP_KEY) as dbx:
dbx.users_get_current_account()
print("Successfully set up client!")
for entry in dbx.files_list_folder(target).entries:
print(entry.name)
def dropbox_list_files(path):
try:
files = dbx.files_list_folder(path).entries
files_list = []
for file in files:
if isinstance(file, dropbox.files.FileMetadata):
metadata = {
'name': file.name,
'path_display': file.path_display,
'client_modified': file.client_modified,
'server_modified': file.server_modified
}
files_list.append(metadata)
df = pd.DataFrame.from_records(files_list)
return df.sort_values(by='server_modified', ascending=False)
except Exception as e:
print('Error getting list of files from Dropbox: ' + str(e))
#function to get the list of files in a folder
def create_links(target, csvfile):
filesList = []
print("creating links for folder " + target)
files = dbx.files_list_folder('/'+target)
filesList.extend(files.entries)
print(len(files.entries))
while(files.has_more == True) :
files = dbx.files_list_folder_continue(files.cursor)
filesList.extend(files.entries)
print(len(files.entries))
for file in filesList :
if (isinstance(file, dropbox.files.FileMetadata)) :
filename = file.name + ',' + file.path_display + ',' + str(file.size) + ','
link_data = dbx.sharing_create_shared_link(file.path_lower)
filename += link_data.url + '\n'
csvfile.write(filename)
print(file.name)
else :
create_links(target+'/'+file.name, csvfile)
#create links for all files in the folder belgeler
create_links(target, open('links.csv', 'w', encoding='utf-8'))
listing = dbx.files_list_folder(target)
#todo: add implementation for files_list_folder_continue
for entry in listing.entries:
if entry.name.endswith(".pdf"):
# note: this simple implementation only works for files in the root of the folder
res = dbx.sharing_get_shared_links(
target + entry.name)
#f.write(res.content)
print('\r', res)

Error: Input array is longer than number of columns in this table powershell

I am trying to load 160gb csv file to sql and I am using powershell script I got from Github and I get this error
IException calling "Add" with "1" argument(s): "Input array is longer than the number of columns in this table."
At C:\b.ps1:54 char:26
+ [void]$datatable.Rows.Add <<<< ($line.Split($delimiter))
+ CategoryInfo : NotSpecified: (:) [], MethodInvocationException
+ FullyQualifiedErrorId : DotNetMethodException
So I checked the same code with small 3 line csv and all of the columns match and also have header in first row and there are no extra delimiters not sure why I am getting this error.
The code is below
<# 8-faster-runspaces.ps1 #>
# Set CSV attributes
$csv = "M:\d\s.txt"
$delimiter = "`t"
# Set connstring
$connstring = "Data Source=.;Integrated Security=true;Initial Catalog=PresentationOptimized;PACKET SIZE=32767;"
# Set batchsize to 2000
$batchsize = 2000
# Create the datatable
$datatable = New-Object System.Data.DataTable
# Add generic columns
$columns = (Get-Content $csv -First 1).Split($delimiter)
foreach ($column in $columns) {
[void]$datatable.Columns.Add()
}
# Setup runspace pool and the scriptblock that runs inside each runspace
$pool = [RunspaceFactory]::CreateRunspacePool(1,5)
$pool.ApartmentState = "MTA"
$pool.Open()
$runspaces = #()
# Setup scriptblock. This is the workhorse. Think of it as a function.
$scriptblock = {
Param (
[string]$connstring,
[object]$dtbatch,
[int]$batchsize
)
$bulkcopy = New-Object Data.SqlClient.SqlBulkCopy($connstring,"TableLock")
$bulkcopy.DestinationTableName = "abc"
$bulkcopy.BatchSize = $batchsize
$bulkcopy.WriteToServer($dtbatch)
$bulkcopy.Close()
$dtbatch.Clear()
$bulkcopy.Dispose()
$dtbatch.Dispose()
}
# Start timer
$time = [System.Diagnostics.Stopwatch]::StartNew()
# Open the text file from disk and process.
$reader = New-Object System.IO.StreamReader($csv)
Write-Output "Starting insert.."
while ((($line = $reader.ReadLine()) -ne $null))
{
[void]$datatable.Rows.Add($line.Split($delimiter))
if ($datatable.rows.count % $batchsize -eq 0)
{
$runspace = [PowerShell]::Create()
[void]$runspace.AddScript($scriptblock)
[void]$runspace.AddArgument($connstring)
[void]$runspace.AddArgument($datatable) # <-- Send datatable
[void]$runspace.AddArgument($batchsize)
$runspace.RunspacePool = $pool
$runspaces += [PSCustomObject]#{ Pipe = $runspace; Status = $runspace.BeginInvoke() }
# Overwrite object with a shell of itself
$datatable = $datatable.Clone() # <-- Create new datatable object
}
}
# Close the file
$reader.Close()
# Wait for runspaces to complete
while ($runspaces.Status.IsCompleted -notcontains $true) {}
# End timer
$secs = $time.Elapsed.TotalSeconds
# Cleanup runspaces
foreach ($runspace in $runspaces ) {
[void]$runspace.Pipe.EndInvoke($runspace.Status) # EndInvoke method retrieves the results of the asynchronous call
$runspace.Pipe.Dispose()
}
# Cleanup runspace pool
$pool.Close()
$pool.Dispose()
# Cleanup SQL Connections
[System.Data.SqlClient.SqlConnection]::ClearAllPools()
# Done! Format output then display
$totalrows = 1000000
$rs = "{0:N0}" -f [int]($totalrows / $secs)
$rm = "{0:N0}" -f [int]($totalrows / $secs * 60)
$mill = "{0:N0}" -f $totalrows
Write-Output "$mill rows imported in $([math]::round($secs,2)) seconds ($rs rows/sec and $rm rows/min)"
Working with a 160 GB input file is going to be a pain. You can't really load it into any kind of editor - or at least you don't really analyze such a data mass without some serious automation.
As per the comments, it seems that the data has some quality issues. In order to find the offending data, you could try binary searching. This approach shrinks the data fast. Like so,
1) Split the file in about two equal chunks.
2) Try and load first chunk.
3) If successful, process the second chunk. If not, see 6).
4) Try and load second chunk.
5) If successful, the files are valid, but you got another a data quality issue. Start looking into other causes. If not, see 6).
6) If either load failed, start from the beginning and use the failed file as the input file.
7) Repeat until you narrow down the offending row(s).
Another a method would be using an ETL tool like SSIS. Configure the package to redirect invalid rows into an error log to see what data is not working properly.

See all of the unique IDs related to a vmware virtual machine

I want to see all of the unique IDs that are specific for a virtual machine such as:
hardware ID, CPU ID, UUID , Mac address and etc.
could anybody please help me to find these IDs??
I can help you finding some of these. For rest of the things you have to search the doc.
Install pyVmomi and run the following code.
EDIT: Changed the code to run on esx host. Simply run it by python .py
Now to know how this code is working. You have to learn Manged Objects. For example here we are working with the Manged Object vm and this object has many properties listed in the doc. So to retrieve uuid of a vm we are invoking vm.config.uuid. Regarding other details you have to go through the VirtualMachine object see what all properties you nedd.
import sys
import atexit
import time
from pyVmomi import vim, vmodl
from pyVim.connect import Disconnect
from pyVim import connect
inputs = {'esx_ip': '15.22.10.10',
'esx_password': 'Password123',
'esx_user': 'root',
'vm_name': 'ubuntu',
}
def wait_for_task(task, actionName='job', hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def main():
si = None
try:
print "Trying to connect ..."
si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'])
except IOError, e:
pass
if not si:
print "Cannot connect to specified host using specified username and password"
sys.exit()
print "Connected to vcenter!"
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Get the VirtualMachine Object
vm = get_obj(content, [vim.VirtualMachine], inputs['vm_name'])
print "GuestID: ", vm.config.guestId
print "UUID: ", vm.config.uuid
print "Version: ", vm.config.version
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
print "MAC Address: ", device.macAddress
#Example of changing UUID:
new_uuid = '423ffff0-5d62-d040-248c-4538ae2c734f'
vmconf = vim.vm.ConfigSpec()
vmconf.uuid = new_uuid
task = vm.ReconfigVM_Task(vmconf)
wait_for_task(task, si)
print "Successfully changed UUID"
print "New UUID: ", vm.config.uuid
if __name__ == "__main__":
main()

redis move all keys

is it possible to use redis's MOVE command to move all keys from 1 database to another? The move command only moves 1 key, but I need to move all the keys in the database.
I would recommend taking a look at the following alpha version app to backup and restore redis databases.. (you can install it via gem install redis-dump). You could redis-dump your databaseand then redis-load into another database via the --database argument.
redis-dump project
If this doesn't fit your purposes, you may need to make use of a scripting language's redis bindings (or alternatively throw something together using bash / redis-cli / xargs, etc). If you need assistance along these lines then we probably need more details first.
I've wrote a small python script to move data between two redis servers:(only support list and string types, and you must install python redis client):
'''
Created on 2011-11-9
#author: wuyi
'''
import redis
from optparse import OptionParser
import time
def mv_str(r_source, r_dest, quiet):
keys = r_source.keys("*")
for k in keys:
if r_dest.keys(k):
print "skipping %s"%k
continue
else:
print "copying %s"%k
r_dest.set(k, r_source.get(k))
def mv_list(r_source, r_dest, quiet):
keys = r_source.keys("*")
for k in keys:
length = r_source.llen(k)
i = 0
while (i<length):
print "add queue no.:%d"%i
v = r_source.lindex(k, i)
r_dest.rpush(k, v)
i += 1
if __name__ == "__main__":
usage = """usage: %prog [options] source dest"""
parser = OptionParser(usage=usage)
parser.add_option("-q", "--quiet", dest="quiet",
default = False, action="store_true",
help="quiet mode")
parser.add_option("-p", "--port", dest="port",
default = 6380,
help="port for both source and dest")
parser.add_option("", "--dbs", dest="dbs",
default = "0",
help="db list: 0 1 120 220...")
parser.add_option("-t", "--type", dest="type",
default = "normal",
help="available types: normal, lpoplist")
parser.add_option("", "--tmpdb", dest="tmpdb",
default = 0,
help="tmp db number to store tmp data")
(options, args) = parser.parse_args()
if not len(args) == 2:
print usage
exit(1)
source = args[0]
dest = args[1]
if source == dest:
print "dest must not be the same as source!"
exit(2)
dbs = options.dbs.split(' ')
for db in dbs:
r_source = redis.Redis(host=source, db=db, password="", port=int(options.port))
r_dest = redis.Redis(host=dest, db=db, password="", port=int(options.port))
print "______________db____________:%s"%db
time.sleep(2)
if options.type == "normal":
mv_str(r_source, r_dest, options.quiet)
elif options.type == "lpoplist":
mv_list(r_source, r_dest, options.quiet)
del r_source
del r_dest
you can try my own tool, rdd
it's a command line utility,
can dump database to a file, work on it (filter, match, merge, ...), and back it in a redis instance
take care, alpha stage, https://github.com/r043v/rdd/
Now that redis has scripting using lua, you can easily write a command that loops through all the keys, checks their type and moves them accordingly to a new database.
I suggest you can try it as below:
1. copy the rdb file to another dir;
2. modify the rdb file name;
3. modify the redis configure file adapter to the new db;