#!/bin/bash
#Oracle DB Info for NEXT
HOST="1.2.3.4"
PORT="5678"
SERVICE="MYDB"
DB_USER=$(whoami)
DB_PASS=$(base64 -d ~/.passwd)
DB_SCHEMA="my_db"
#Section for all of our functions.
function SQLConnection(){
sqlplus "$DB_USER"/"$DB_PASS"#"$HOST":"$PORT"/"$SERVICE"
}
function Connected(){
SQLConnection <<EOF
select sys_context('USERENV','SERVER_HOST') from dual;
EOF
}
function GetJMS(){
SQLConnection <<EOF
set echo on timing on lines 200 pages 100
select pd.destination from ${DB_SCHEMA}.pd_notification pd where pd.org_id = '$ORGID';
EOF
}
TODAY=$(date +"%A %B %d, %Y")
read -r -p $'\n\nWhat is the ORG ID? ' ORGID
read -r -p $'\n\nWhat is the REMOTE QUEUE MANAGER NAME? ' RQM
read -r -p $'\n\nWhat is the IP address of the REMOTE QUEUE MANAGER? ' CONN
read -r -p $'\n\nWhat is the PORT of the REMOTE QUEUE MANAGER? ' PORT
echo -en "* $(whoami)\n* $TODAY\n* MQ Setup $ORGID\n\nDEFINE +\n\tCHANNEL('$RQM.LQML') +\n\tCHLTYPE(SDR) +\n\tCONNAME('$CONN($PORT)') +\n\tXMITQ('BUF.2.$ORGID.XMQ')\n\tCHAUTH(TLS_RSA_WITH_AES_256_CBC_SHA256)\n\nDEFINE +\n\tCHANNEL('LQML.$RQM') +\n\tCHLTYPE(RCVR) +\n\tTRPTYPE(TCP)\n\nDEFINE +\n\tQLOCAL('$RQM') +\n\tTRIGDATA('LQML.$RQM') +\n\tINITQ('SYSTEM.CHANNEL.INITQ') +\n\tTRIGGER USAGE(XMITQ)\n\n" > ~/mqsetup.mqsc
CONNECTED=$(Connected | awk 'NR==16')
echo -en "\n\nHello From: $CONNECTED\n\n"
for JMSDESTINATION in $(GetJMS | awk 'NR>=16&&NR<=24{print $1}')
do
read -r -p $'\n\nWhich REMOTE QUEUE NAME matches with this ${JMSDESTINATION}?' RNAME
QDESC=$(echo "$JMSDESTINATION" | tr '.' ' ' | tr '[[:upper:]]' '[[:lower:]]')
echo -en "\n\nDEFINE +\n\tQR($JMSDESTINATION) +\n\t\tREPLACE DESCR('$ORGID $QDESC Queue') +\n\t\tREPLACE MAXDEPTH(5000) +\n\t\tXMITQ('BUF.2.$ORGID.XMQ') +\n\t\tRNAME('$RNAME') +\n\t\tRQMNAME('$RQM')" >> ~/mqsetup.mqsc
done
Here is the script I've built, hoping to automate the setup of IBM MQ Queues and Channels. My problem is that outside this script, I can establish an SQL Session without an issue, directly from the shell, provided I input the variables seen in the script. I can call the functions and everything returns just as I'd hope it would. When I run the exact same things from within the script, I get timeout errors ... the "Hello From" is blank, which tells me there is no DB connection.
I'm totally stumped as to why it all works great from outside the script, but inside it times out.
I appreciate the eyes and the help!
You're overwritng a variable value. You have this at the top of the script:
PORT="5678"
but then later on you do:
read -r -p $'\n\nWhat is the PORT of the REMOTE QUEUE MANAGER? ' PORT
which overwrites your 5678 value with whatever is entered there. That port may not be listening on the DB server at all, or may be doing something else, or if you don't enter a value it'll default to port 1521 when you connect. But either way the connection is going to fail, either quickly or slowly depending on the port state (e.g. slower maybe if a firewall blocks it).
If you test the connection by adding a Connected call before the read calls (as I initially did) then it seems to be working fine; but the connections after the reads don't work because port value it tries to connect to is now wrong.
Use a different name for the two variables, e.g. RQ_PORT for the second one - both in its read command and the subsequent creation of the ~/mqsetup.mqsc file.
You may also find it useful to add the -l flag to your SQL*Plus call so that if the connection fails for some reason it won't re-prompt for credentials, which in some circumstances can make the script appear to hang until you hit enter a few times.
Not directly relevant to the problem, but when automating anything like this I usually also use the -s flag to suppress the banners (which can vary between environments); and if you're only interested in capturing query output I'd usually set headings and/or pagination off, and feedback off, and generally set SQL*Plus up to generate as little noise as possible - it makes parsing out the interesting bits easier.
Related
Given the following table
$ connmanctl services
*AO MyNetwork wifi_dc85de828967_68756773616d_managed_psk
OtherNET wifi_dc85de828967_38303944616e69656c73_managed_psk
AnotherOne wifi_dc85de828967_3257495245363836_managed_wep
FourthNetwork wifi_dc85de828967_4d7572706879_managed_wep
AnOpenNetwork wifi_dc85de828967_4d6568657272696e_managed_none
I'd like to be able to connect to a network, e.g. OtherNET, using the string OtherNET rather than the long wifi_dc85de828967_38303944616e69656c73_managed_psk, as I don't want to count the times I press Tab and/or check that the wifi_ line in the prompt corresponds to the intended network.
Is this possible with connman only? Or do I really have to write a wrapper myself?
The man page of connmanctl contains
services
Shows a list of all available services. This includes the
nearby wifi networks, the wired ethernet connections, blue‐
tooth devices, etc. An asterisk in front of the service
indicates that the service has been connected before.
and
connect service
Connects to the given service. Some services need a so-
called provisioning file in order to connect to them, see
connman-service.config(5).
which both don't say much about the format of the output or the use of the command.
Similarly, the wiki on Arch Linux refers to the last column as the second field beginning with wifi_.
Since nobody has answered yet, I have found some spare time to code the following wrapper, which basically does the following
keeps reading as long as the input is not exit;
when an input is provided which starts with connect, the following word is used to pattern-match one line (only the first matching line) from connman services; the last field of this line (the one which starts with wifi_) is forwarded to connmanctl connect;
any other non-empty input is forwarded to connmanctl as it is.
a certain number (to be passed through the variable NOINPUTS_BEFORE_EXIT whose default is 3) of empty inputs causes the script to exit.
The script is the following
#!/usr/bin/env bash
name=$(basename $0)
noinputs_before_exit=${NOINPUTS_BEFORE_EXIT:=3}
while [[ $cmd != 'exit' ]]; do
echo -n "$name " 1>&2
read cmd
if [[ -z "$cmd" ]]; then
(( --noinputs_before_exit == 0 )) && exit
else
noinputs_before_exit=$NOINPUTS_BEFORE_EXIT
if [[ $cmd =~ ^connect\ ]]; then
connmanctl connect $(connmanctl services | awk '/'"${cmd#* }"'/ { print $NF; exit }')
else
connmanctl $cmd
fi
fi
done
The script limitations are at least the following:
(less importantly, to me) I have no idea whether it meets any security requirements;
it does not allow Tab-completion;
(most importantly, to me) it does not use the readline library, so line editing impossible.
I'm using screen to monitor several parallel jobs to test small variations of my program. I gave each screen session a different logfile. I do not remember which logfile I set for which session, and now wish I did!
Is there a way to query which session name (usually of the form #####.ttys000N.hostname) goes with which logfile, or vice-versa?
(To whom it concerns: the gnu-screen tag suggests determining which SX site the question is most relevant to. Based on the help pages of SuperUser and StackOverflow, this question appears roughly equally applicable to either community. Feel free to migrate it if you think it belongs elsewhere.)
I didn't find my suggested comment of using screen -ls to list the process ids, and then doing an lsof -p on these to find the filenames very satisfactory, so here is another not entirely satisfactory alternative:
There is an option -X to send commands to a remote screen, but unfortunately any output is shown on the remote. There is an option -Q to send a command and print the result locally, but it only accepts a very limited set of commands. However, one of these is lastmsg, which repeats the last message displayed.
So you can use -X logfile to display the name of the logfile remotely, then immediately use -Q lastmsg to duplicate that display locally! There is, of course, the possibility of some event occurring in the middle of this non-atomic action. The two commands cannot be combined. Here's an example:
#!/bin/bash
screen -ls |
while read session rest
do if [[ "$session" =~ [0-9]+\..+ ]]
then screen -S "$session" -X logfile # shows in status
msg=$(screen -S "$session" -Q lastmsg)
# logfile is '/tmp/xxxxx'
echo "$session $msg"
fi
done
and some typical output:
21017.test2 logfile is '/tmp/xxxxx'
20166.test logfile is '/tmp/mylog.%n'
In the repository home page , i can see comments posted in recent activity at the bottom, bit it only shows 10 commnets.
i want to all the comments posted since beginning.
Is there any way
Comments of pull requests, issues and commits can be retrieved using bitbucket’s REST API.
However it seems that there is no way to list all of them at one place, so the only way to get them would be to query the API for each PR, issue or commit of the repository.
Note that this takes a long time, since bitbucket has seemingly set a limit to the number of accesses via API to repository data: I got Rate limit for this resource has been exceeded errors after retrieving around a thousand results, then I could retrieve about only one entry per second elapsed from the time of the last rate limit error.
Finding the API URL to the repository
The first step is to find the URL to the repo. For private repositories, it is necessary to get authenticated by providing username and password (using curl’s -u switch). The URL is of the form:
https://api.bitbucket.org/2.0/repositories/{repoOwnerName}/{repoName}
Running git remote -v from the local git repository should provide the missing values. Check the forged URL (below referred to as $url) by verifying that repository information is correctly retrieved as JSON data from it: curl -u username $url.
Fetching comments of commits
Comments of a commit can be accessed at $url/commit/{commitHash}/comments.
The resulting JSON data can be processed by a script. Beware that the results are paginated.
Below I simply extract the number of comments per commit. It is indicated by the value of the member size of the retrieved JSON object; I also request a partial response by adding the GET parameter fields=size.
My script getNComments.sh:
#!/bin/sh
pw=$1
id=$2
json=$(curl -s -u username:"$pw" \
https://api.bitbucket.org/2.0/repositories/{repoOwnerName}/{repoName}/commit/$id/comments'?fields=size')
printf '%s' "$json" | grep -q '"type": "error"' \
&& printf "ERROR $id\n" && exit 0
nComments=$(printf '%s' "$json" | grep -o '"size": [0-9]*' | cut -d' ' -f2)
: ${nComments:=EMPTY}
checkNumeric=$(printf '%s' "$nComments" | tr -dc 0-9)
[ "$nComments" != "$checkNumeric" ] \
&& printf >&2 "!ERROR! $id:\n%s\n" "$json" && exit 1
printf "$nComments $id\n"
To use it, taking into account the possibility for the error mentioned above:
A) Prepare input data. From the local repository, generate the list of commits as wanted (run git fetch -a prior to update the local git repo if needed); check out git help rev-list for how it can be customised.
git rev-list --all | sort > sorted-all.id
cp sorted-all.id remaining.id
B) Run the script. Note that the password is passed here as a parameter – so first assign it to a variable safely using stty -echo; IFS= read -r passwd; stty echo, in one line; also see security considerations below. The processing is parallelised onto 15 processes here, using the option -P.
< remaining.id xargs -P 15 -L 1 ./getNComments.sh "$passwd" > commits.temp
C) When the rate limit is reached, that is when getNComments.sh prints !ERROR!, then kill the above command (Ctrl-C), and execute these below to update the input and output files. Wait a while for the request limit to increase, then re-execute the above one command and repeat until all the data is processed (that is when wc -l remaining.id returns 0).
cat commits.temp >> commits.result
cut -d' ' -f2 commits.result | sort | comm -13 - sorted-all.id > remaining.id
D) Finally, you can get the commits which received comments with:
grep '^[1-9]' commits.result
Fetching comments of pull requests and issues
The procedure is the same as for fetching commits’ comments, but for the following two adjustments:
Edit the script to replace in the URL commit by pullrequests or by issues, as appropriate;
Let $n be the number of issues/PRs to search. The git rev-list command above becomes: seq 1 $n > sorted-all.id
The total number of PRs in the repository can be obtained with:
curl -su username $url/pullrequests'?state=&fields=size'
and, if the issue tracker is set up, the number of issues with:
curl -su username $url/issues'?fields=size'
Hopefully, the repository has few enough PRs and issues so that all data can be fetched in one go.
Viewing comments
They can be viewed normally via the web interface on their commit/PR/issue page at:
https://bitbucket.org/{repoOwnerName}/{repoName}/commits/{commitHash}
https://bitbucket.org/{repoOwnerName}/{repoName}/pull-requests/{prId}
https://bitbucket.org/{repoOwnerName}/{repoName}/issues/{issueId}
For example, to open all PRs with comments in firefox:
awk '/^[1-9]/{print "https://bitbucket.org/{repoOwnerName}/{repoName}/pull-requests/"$2}' PRs.result | xargs firefox
Security considerations
Arguments passed on the command line are visible to all users of the system, via ps ax (or /proc/$PID/cmdline). Therefore the bitbucket password will be exposed, which could be a concern if the system is shared by multiple users.
There are three commands getting the password from the command line: xargs, the script, and curl.
It appears that curl tries to hide the password by overwriting its memory, but it is not guaranteed to work, and even if it does, it leaves it visible for a (very short) time after the process starts. On my system, the parameters to curl are not hidden.
A better option could be to pass the sensitive information through environment variables. They should be visible only to the current user and root via ps axe (or /proc/$PID/environ); although it seems that there are systems that let all users access this information (do a ls -l /proc/*/environ to check the environment files’ permissions).
In the script simply replace the lines pw=$1 id=$2 with id=$1, then pass pw="$passwd" before xargs in the command line invocation. It will make the environment variable pw visible to xargs and all of its descendent processes, that is the script and its children (curl, grep, cut, etc), which may or may not read the variable. curl does not read the password from the environment, but if its password hiding trick mentioned above works then it might be good enough.
There are ways to avoid passing the password to curl via the command line, notably via standard input using the option -K -. In the script, replace curl -s -u username:"$pw" with printf -- '-s\n-u "%s"\n' "$authinfo" | curl -K - and define the variable authinfo to contain the data in the format username:password. Note that this method needs printf to be a shell built-in to be safe (check with type printf), otherwise the password will show up in its process arguments. If it is not a built-in, try with print or echo instead.
A simple alternative to an environment variable that will not appear in ps output in any case is via a file. Create a file with read/write permissions restricted to the current user (chmod 600), and edit it so that it contains username:password as its first line. In the script, replace pw=$1 with IFS= read -r authinfo < "$1", and edit it to use curl’s -K option as in the paragraph above. In the command line invocation replace $passwd with the filename.
The file approach has the drawback that the password will be written to disk (note that files in /proc are not on the disk). If this too is undesirable, it is possible to pass a named pipe instead of a regular file:
mkfifo pipe
chmod 600 pipe
# make sure printf is a builtin, or use an equivalent instead
(while :; do printf -- '%s\n' "username:$passwd"; done) > pipe&
pid=$!
exec 3<pipe
Then invoke the script passing pipe instead of the file. Finally, to clean up do:
kill $pid
exec 3<&-
This will ensure the authentication info is passed directly from the shell to the script (through the kernel), is not written to disk and is not exposed to other users via ps.
You can go to Commits and see the top line for each commit, you will need to click on each one to see further information.
If I find a way to see all without drilling into each commit, I will update this answer.
I have an expect script which I need to run every 3 mins on my management node to collect tx/rx values for each port attached to DCX Brocade SAN Switch using the command #portperfshow#
Each time I try to use crontab to execute the script every 3 mins, the script does not work!
My expect script starts with #!/usr/bin/expect -f and I am calling the script using the following syntax under cron:
3 * * * * /usr/bin/expect -f /root/portsperfDCX1/collect-all.exp sanswitchhostname
However, when I execute the script (not under cron) it works as expected:
root# ./collect-all.exp sanswitchhostname
works just fine.
Please Please can someone help! Thanks.
The script collect-all.exp is:
#!/usr/bin/expect -f
#Time and Date
set day [timestamp -format %d%m%y]
set time [timestamp -format %H%M]
#logging
set LogDir1 "/FPerf/PortsLogs"
et timeout 5
set ipaddr [lrange $argv 0 0]
set passw "XXXXXXX"
if { $ipaddr == "" } {
puts "Usage: <script.exp> <ip address>\n"
exit 1
}
spawn ssh admin#$ipaddr
expect -re "password"
send "$passw\r"
expect -re "admin"
log_file "$LogDir1/$day-portsperfshow-$time"
send "portperfshow -tx -rx -t 10\r"
expect timeout "\n"
send \003
log_file
send -- "exit\r"
close
I had the same issue, except that my script was ending with
interact
Finally I got it working by replacing it with these two lines:
expect eof
exit
Changing interact to expect eof worked for me!
Needed to remove the exit part, because I had more statements in the bash script after the expect line (calling expect inside a bash script).
There are two key differences between a program that is run normally from a shell and a program that is run from cron:
Cron does not populate (many) environment variables. Notably absent are TERM, SHELL and HOME, but that's just a small proportion of the long list that will be not defined.
Cron does not set up a current terminal, so /dev/tty doesn't resolve to anything. (Note, programs spawned by Expect will have a current terminal.)
With high probability, any difficulties will come from these, especially the first. To fix, you need to save all your environment variables in an interactive session and use these in your expect script to repopulate the environment. The easiest way is to use this little expect script:
unset -nocomplain ::env(SSH_AUTH_SOCK) ;# This one is session-bound anyway
puts [list array set ::env [array get ::env]]
That will write out a single very long line which you want to put near the top of your script (or at least before the first spawn). Then see if that works.
Jobs run by cron are not considered login shells, and thus don't source your .bashrc, .bash_profile, etc.
If you want that behavior, you need to add it explicitly to the crontab entry like so:
$ crontab -l
0 13 * * * bash -c '. .bash_profile; etc ...'
$
I currently have the following lines of code in a script:
set -A ARRAY OPTION1 OPTION2 OPTION3 OPTION4
set -A matches
for OPTION in ${ARRAY[#]}; do
DIFF=$(ssh $USER#$host " diff $PERSONALCONF $PRESETS$OPTION" )
if [[ $DIFF == "" ]]; then
set -A matches"${matches[#]}" $OPTION
fi
done
Basically, I have a loop that goes through each element in a pre-defined array, connects to a remote server (same server each time), and then compares a file with a file as defined by the loop using the diff command. Basically, it compares a personal.conf file with personal.conf.option1, personal.conf.option2, etc. If there is no difference, it adds it to the array. If there is a difference, nothing happens.
I was wondering if its possible to execute this or get the same result (storing the matching files in an array ON THE HOST MACHINE, not the server that's being connected to) by way of only connecting once via SSH. I cannot store anything on the remote server, nor can I execute a remote script on that server. I can only issue commands via ssh (kind of a goofy setup). Currently, it connects as many times as there are options. This seems inefficient. If anyone has a better solution I'd love to hear it.
Several options:
You can use OpenSSH multiplexing feature (see ssh(1)).
Also, most shells will gladly accept a script to run over stdin, so you could just run something like
cat script.sh | ssh $HOST /bin/sh
Most scripting languages (Perl, Python, Ruby, etc.) have some SSH module that allows connection reuse:
#!/usr/bin/perl
use Net::OpenSSH;
my ($user, $host) = (...);
my #options = (...);
my #matches;
my $ssh = Net::OpenSSH->new("$user\#$host");
for my $option (#options) {
my $diff = $ssh->capture("diff $personal_conf $presets$option");
if ($ssh->error) {
warn "command failed: " . $ssh->error;
}
else {
push #matches, $option if $diff eq '';
}
}
print "#matches\n";