weblogic wlst findService NameError - weblogic

I have the following wlst script:
import wlstModule
from com.bea.wli.sb.management.configuration import SessionManagementMBean
from com.bea.wli.sb.management.configuration import ALSBConfigurationMBean
from com.bea.wli.config import Ref
#=======================================================================================
# Utility function to read a binary file
#=======================================================================================
def readBinaryFile(fileName):
file = open(fileName, 'rb')
bytes = file.read()
return bytes
#=======================================================================================
# Utility function to create an arbitrary session name
#=======================================================================================
def createSessionName():
sessionName = String("SessionScript"+Long(System.currentTimeMillis()).toString())
return sessionName
def getSessionManagementMBean(sessionName):
SessionMBean = findService("SessionManagement", "com.bea.wli.sb.management.configuration.SessionManagementMBean")
SessionMBean.createSession(sessionName)
return SessionMBean
SessionMBean = None
importJar='C:\\OSB_PROJECT.jar'
theBytes = readBinaryFile(importJar)
sessionName = createSessionName()
SessionMBean = getSessionManagementMBean(sessionName)
The result is an error:
wls:/offline> execfile('C:\script.py') Traceback (innermost last):
File "", line 1, in ? File "C:\script.py", line 31, in ?
File "C:\script.py", line 22, in get SessionManagementMBean
NameError: findService
How can I fix this?

Are you ever connecting to your server and accessing the domain runtime? You should be doing something like the following:
connect("weblogic", "weblogic", "t3://localhost:7001")
domainRuntime()
# obtain session management mbean to create a session.
# This mbean instance can be used more than once to
# create/discard/commit many sessions
sessionMBean = findService(SessionManagementMBean.NAME,SessionManagementMBean.TYPE)
See more here:
http://docs.oracle.com/cd/E13171_01/alsb/docs25/javadoc/com/bea/wli/sb/management/configuration/SessionManagementMBean.html

Related

Error using Python BigQuery API with user authentication

I'm getting an error when querying BigQuery from Python using end-user authentication
It works successfully with service account authentication, but fails with end-user authentication.
I am essentially following these instructions https://cloud.google.com/docs/authentication/end-user
The error message is:
ProjectId and DatasetId must be non-empty
I am stumped. Using service account authentication returns the expected data, so it appears to be an authentication related issue, but the authentication step appears to be successful.
Details
from google_auth_oauthlib import flow
from google.cloud import bigquery
appflow = flow.InstalledAppFlow.from_client_secrets_file(
"client_secrets.json", scopes=["https://www.googleapis.com/auth/bigquery"])
appflow.run_local_server()
credentials = appflow.credentials
client = bigquery.Client(project='MyProject', credentials=credentials)
query_string = """SELECT name, SUM(number) as total
FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE name = 'William'
GROUP BY name;
"""
query_job = client.query(query_string)
for row in query_job.result():
print("{}: {}".format(row["name"], row["total"]))
gives the following errors:
Please visit this URL to authorize this application: https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=...
Traceback (most recent call last):
File "C:\python\test\bqtest3.py", line 15, in <module>
query_job = client.query(query_string)
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\bigquery\client.py", line 3331, in query
return _job_helpers.query_jobs_insert(
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\bigquery\_job_helpers.py", line 114, in query_jobs_insert
future = do_query()
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\bigquery\_job_helpers.py", line 91, in do_query
query_job._begin(retry=retry, timeout=timeout)
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\bigquery\job\query.py", line 1298, in _begin
super(QueryJob, self)._begin(client=client, retry=retry, timeout=timeout)
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\bigquery\job\base.py", line 510, in _begin
api_response = client._call_api(
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\bigquery\client.py", line 756, in _call_api
return call()
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\retry.py", line 283, in retry_wrapped_func
return retry_target(
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\retry.py", line 190, in retry_target
return target()
File "C:\Users\me\AppData\Local\Programs\Python\Python310\lib\site-packages\google\cloud\_http\__init__.py", line 494, in api_request
raise exceptions.from_http_response(response)
google.api_core.exceptions.BadRequest: 400 POST https://bigquery.googleapis.com/bigquery/v2/projects/MyProject/jobs?prettyPrint=false: ProjectId and DatasetId must be non-empty
Location: None
Job ID: 9eba1ce9-971a-4495-825a-728aed28fc98
Please add python env to the script like below(marked in bold).
#!/usr/bin/env python
from google_auth_oauthlib import flow
from google.cloud import bigquery
appflow = flow.InstalledAppFlow.from_client_secrets_file("client_secrets.json", scopes=["https://www.googleapis.com/auth/bigquery"])
appflow.run_local_server()
credentials = appflow.credentials
client = bigquery.Client(project='MyProject', credentials=credentials)
query_job = client.query(
"""
SELECT name, SUM(number) as total
FROM bigquery-public-data.usa_names.usa_1910_current
WHERE name = 'William'
GROUP BY name;
"""
)
for row in query_job.result():
print("{}: {}".format(row["name"], row["total"]))

Unable to use 'read_sql' to call a SQL query class

I am trying to pull results from the database with the following code:
import pandas as pd
import pyodbc
class DataManagement(object):
def __init__(self, database = None, server=None, trusted_connection=True, database_driver=ODBC_SQL2005_2012, uid=None, pwd=None):
self.server = server
self.database = database
self.uid = uid
self.pwd = pwd
# Use default server name none supplied - assumed to be localhost
if self.server is None:
self.server = SERVER
if self.database is None:
self.database = DATABASE
# Use default sql credentials if none provided
if self.uid is None or self.pwd is None:
self.uid = DEFAULT_UID
self.pwd = DEFAULT_PASSWORD
if trusted_connection:
self.connectionstring = "DRIVER={0};SERVER={1};DATABASE={2};Trusted_Connection=yes;".format(database_driver, self.server, self.database)
else:
self.connectionstring = 'DRIVER={0};SERVER={1};DATABASE={2};UID={3};PWD={4};'.format(database_driver, self.server, self.database, uid, pwd)
self.connection = pyodbc.connect(self.connectionstring)
self.cursor = self.connection.cursor()
def __enter__(self):
return self
def __exit__(self, ctx_type, ctx_value, ctx_traceback):
self.connection.commit()
self.connection.close()
qq = DataManagement()
sql =("select * from ***** ")
data_df = pd.read_sql(sql, qq)
I get an error:
Traceback (most recent call last):
File "<ipython-input-94-453876631fe0>", line 3, in <module>
data_df = pd.read_sql(sql, qq)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 380, in read_sql
chunksize=chunksize)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1468, in read_query
cursor = self.execute(*args)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1426, in execute
cur = self.con.cursor()
TypeError: 'pyodbc.Cursor' object is not callable
I saw a similar question at TypeError: 'pyodbc.Cursor' object is not callable (Python 3.6) but unable to get an answer from there.
I got it to work by editing the class from
self.cursor = self.connection.cursor()
into
self.cursor = self.connection.cursor

How to write a pickle file to S3, as a result of a luigi Task?

I want to store a pickle file on S3, as a result of a luigi Task. Below is the class that defines the Task:
class CreateItemVocabulariesTask(luigi.Task):
def __init__(self):
self.client = S3Client(AwsConfig().aws_access_key_id,
AwsConfig().aws_secret_access_key)
super().__init__()
def requires(self):
return [GetItem2VecDataTask()]
def run(self):
filename = 'item2vec_results.tsv'
data = self.client.get('s3://{}/item2vec_results.tsv'.format(AwsConfig().item2vec_path),
filename)
df = pd.read_csv(filename, sep='\t', encoding='latin1')
unique_users = df['CustomerId'].unique()
unique_items = df['ProductNumber'].unique()
item_to_int, int_to_item = utils.create_lookup_tables(unique_items)
user_to_int, int_to_user = utils.create_lookup_tables(unique_users)
with self.output()[0].open('wb') as out_file:
pickle.dump(item_to_int, out_file)
with self.output()[1].open('wb') as out_file:
pickle.dump(int_to_item, out_file)
with self.output()[2].open('wb') as out_file:
pickle.dump(user_to_int, out_file)
with self.output()[3].open('wb') as out_file:
pickle.dump(int_to_user, out_file)
def output(self):
files = [S3Target('s3://{}/item2int.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/int2item.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/user2int.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/int2user.pkl'.format(AwsConfig().item2vec_path), client=self.client),]
return files
When I run this task I get the error ValueError: Unsupported open mode 'wb'. The items I try to dump into a pickle file are just python dictionaries.
Full traceback:
Traceback (most recent call last):
File "C:\Anaconda3\lib\site-packages\luigi\worker.py", line 203, in run
new_deps = self._run_get_new_deps()
File "C:\Anaconda3\lib\site-packages\luigi\worker.py", line 140, in _run_get_new_deps
task_gen = self.task.run()
File "C:\Users\user\Documents\python workspace\pipeline.py", line 60, in run
with self.output()[0].open('wb') as out_file:
File "C:\Anaconda3\lib\site-packages\luigi\contrib\s3.py", line 714, in open
raise ValueError("Unsupported open mode '%s'" % mode)
ValueError: Unsupported open mode 'wb'
This is an issue that only happens on python 3.x as explained here. In order to use python 3 and write a binary file or target (ie using 'wb' mode) just set format parameter for S3Target to Nop. Like this:
S3Target('s3://path/to/file', client=self.client, format=luigi.format.Nop)
Notice it's just a trick and not so intuitive nor documented.

python3.5 paramiko failed with hostname

Please help me to correct my script. My script checks the ssh login of a host and displays success/failure based on result. It fails when I give wrong hostname.
Code is as of below :
[root#test1 script]# cat param.py
import multiprocessing
import paramiko
import random
import threading
import time
host_name = "test2"
print ("Checking hostname :"+str(host_name))
file = open('output_file','a')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host_name, username='root', password='test')
print ("success")
file.write("success:"+str(host_name+"\n"))
except paramiko.SSHException:
print ("Connection Failed")
file.write("failed:"+str(host_name+"\n"))
quit()
stdin,stdout,stderr = ssh.exec_command("hostname&&uptime")
for line in stdout.readlines():
print (line.strip())
ssh.close()
It works fine when the correct username/password given :
[root#test1 script]# python3 param.py
Checking hostname :test2
success
test2
12:31:49 up 83 days, 2:56, 2 users, load average: 0.00, 0.01, 0.05
It works fine when the wrong password is given. I have changed the password to a wrong one in the script and it says connection failed as expected.
root#test1 script]# python3 param.py
Checking hostname :test2
Connection Failed
Now my question, when I change the hostname to something doenot exist, paramiko fails and lot of errors pop up.
[root#test1 script]# python3 param.py
Checking hostname :invalidtest2
Traceback (most recent call last):
File "param.py", line 16, in <module>
ssh.connect(host_name, username='root', password='test')
File "/root/usr/local/lib/python3.5/site-packages/paramiko/client.py", line 301, in connect
to_try = list(self._families_and_addresses(hostname, port))
File "/root/usr/local/lib/python3.5/site-packages/paramiko/client.py", line 199, in _families_and_addresses
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
File "/root/usr/local/lib/python3.5/socket.py", line 728, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
socket.gaierror: [Errno -2] Name or service not known
How can I get message that the connection failed ? I'm using python3.5
It is not good idea to keep the passwords stored in the script itself. Modified the script so that it takes the hostnames from a file and writes the output to different files
#!/bin/python3
import threading, time, paramiko, socket, getpass
from queue import Queue
start_time1 = time.time()
locke1 = threading.Lock()
q = Queue()
#Check the login
def check_hostname(host_name, pw_r):
with locke1:
print ("Checking hostname :"+str(host_name)+" with " + threading.current_thread().name)
file_output = open('output_file','a')
file_success = open('success_file','a')
file_failed = open('failed_file','a')
file_error = open('error_file','a')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host_name, username='root', password=pw_r)
#print ("Success")
file_success.write(str(host_name+"\n"))
file_success.close()
file_output.write("success: "+str(host_name+"\n"))
file_output.close()
# printing output if required from remote machine
#stdin,stdout,stderr = ssh.exec_command("hostname&&uptime")
#for line in stdout.readlines():
# print (line.strip())
except paramiko.SSHException:
# print ("error")
file_failed.write(str(host_name+"\n"))
file_failed.close()
file_output.write("failed: "+str(host_name+"\n"))
file_output.close()
#quit()
except paramiko.ssh_exception.NoValidConnectionsError:
#print ("might be windows------------")
file_output.write("failed: " + str(host_name + "\n"))
file_output.close()
file_failed.write(str(host_name+"\n"))
file_failed.close()
#quit()
except socket.gaierror:
#print ("wrong hostname/dns************")
file_output.write("error: "+str(host_name+"\n"))
file_output.close()
file_error.write(str(host_name + "\n"))
file_error.close()
ssh.close()
def performer1():
while True:
hostname_value = q.get()
check_hostname(hostname_value,pw_sent)
q.task_done()
if __name__ == '__main__':
print ("This script checks all the hostnames in the input_file with your standard password and write the outputs in below files: \n1.file_output\n2.file_success \n3.file_failed \n4.file_error \n")
f = open('output_file', 'w')
f.write("-------Output of all hosts-------\n")
f.close()
f = open('success_file', 'w')
f.write("-------Success hosts-------\n")
f.close()
f = open('failed_file', 'w')
f.write("-------Failed hosts-------\n")
f.close()
f = open('error_file', 'w')
f.write("-------Hosts with error-------\n")
f.close()
with open("input_file") as f:
hostname1 = f.read().splitlines()
#Read the standard password from the user
pw_sent=getpass.getpass("Enter the Password:")
for i in hostname1:
q.put(i)
#print ("all the hostname : "+str(list(q.queue)))
for no_of_threads in range(3):
t = threading.Thread(target=performer1)
t.daemon=True
t.start()
q.join()
print ("Check output files for results")
print ("completed task in" + str(time.time()-start_time1) + "seconds")

Socket error while connecting to hive through: hiveserver2-client.py

Has anyone experienced this error before when trying to connect to hive.
Sample code used (https://github.com/telefonicaid/fiware-cygnus/blob/master/cygnus-ngsi/resources/hiveclients/python/hiveserver2-client.py):
import sys
import pyhs2
from pyhs2.error import Pyhs2Exception
# get the input parameters
if len(sys.argv) != 6:
print 'Usage: python hiveserver2-client.py <hive_host> <hive_port> <db_name> <hadoop_user> <hadoop_password>'
sys.exit()
hiveHost = sys.argv[1]
hivePort = sys.argv[2]
dbName = sys.argv[3]
hadoopUser = sys.argv[4]
hadoopPassword = sys.argv[5]
# do the connection
with pyhs2.connect(host=hiveHost,
port=hivePort,
authMechanism="PLAIN",
user=hadoopUser,
password=hadoopPassword,
database=dbName) as conn:
# get a client
with conn.cursor() as client:
# create a loop attending HiveQL queries
while (1):
query = raw_input('remotehive> ')
try:
if not query:
continue
if query == 'exit':
sys.exit()
# execute the query
client.execute(query)
# get the content
for row in client.fetch():
print row
except Pyhs2Exception, ex:
print ex.errorMessage
Error displayed:
[centos#test]$ sudo python hiveserver2-client.py computing.cosmos.lab.fiware.org 10000 default USERNAME TOKEN
Traceback (most recent call last):
File "hiveserver2-client.py", line 42, in <module>
database=dbName) as conn:
File "/usr/lib/python2.7/site-packages/pyhs2/__init__.py", line 7, in connect
return Connection(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/pyhs2/connections.py", line 46, in __init__
transport.open()
File "/usr/lib/python2.7/site-packages/pyhs2/cloudera/thrift_sasl.py", line 74, in open
status, payload = self._recv_sasl_message()
File "/usr/lib/python2.7/site-packages/pyhs2/cloudera/thrift_sasl.py", line 92, in _recv_sasl_message
header = self._trans.readAll(5)
File "/usr/lib/python2.7/site-packages/thrift/transport/TTransport.py", line 60, in readAll
chunk = self.read(sz - have)
File "/usr/lib/python2.7/site-packages/thrift/transport/TSocket.py", line 132, in read
message='TSocket read 0 bytes')
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
can you post your piece of code ? This looks like some auth mechanism or credentials sent are not Valid.
authMechanism= can be "PLAIN" or "KERBEROS" as per your setup .