tcl child process executed abnormally - process

I'm using the code here to run some processes in parallel.
https://wiki.tcl-lang.org/page/Execute+in+Parallel+and+Wait
Here is the code that I've modified.
foreach scenario $scenario_list {
set script {
set sname $scenario
puts "Scenario: $sname"
set sdir "$curr_dir/$sname"
puts "Results from: $sdir"
extract_system_kpis $sname "SUCCESS" $sdir $hw_instance_list $hbm_scheduler_pairs
}
set chan [open |[list [info nameofexecutable] <<$script 2>#stderr]]
dict set res $chan command $script
fconfigure $chan -blocking 0
lappend background $chan
}
while 1 {
foreach chan $background {
if {[eof $chan]} {
fconfigure $chan -blocking 1
if {[set idx [lsearch -exact $background $chan]] >= 0} {
set background [lreplace $background $idx $idx]
}
catch [close $chan] cres copts
dict set res $chan result $cres
dict set res $chan options $copts
} else {
puts -nonewline [read $chan]
}
}
if {[llength $background] == 0} {
break
}
after 100
}
return $res
I'm getting the error
child process exited abnormally
while executing
"close $chan"
Any ideas? I'm running TCL 8.6
Thanks

Related

AWK Output Produces Function Not Defined

I inherited this code so I'm seeing if someone can help me with the error message.
Here's the AWK file. This file uses a CSV file and is supposed to produce a formatted list.
The return error is FNR=1 fatal: function `header' not defined.
This must have worked at some point in time. Not sure how long it's been broken and I just learned about it a couple of days ago.
Can anyone help?
CSV File
"POSN","STATUS","TITLE","BEGIN_DATE","END_DATE","ROLL","PIDM","A_NUMBER","FIRST_NAME","LAST_NAME","EGRP"
"C99999","A","Title","01-JUL-95","","C",888888,"A00888888","John","Doe1","22"
"C99999","A","Ttile","01-JUL-95","","C",9999999,"A09999999","John","Doe2","23"
"C11111","A","Title","01-JUL-95","","C",0000001,"A00000001","John","Doe3","01"
$PROG_LC.awk
# fieldname len
# 1 posn 6
# 2 status 1
# 3 title 30
# 4 begin_date 10
# 5 end_date 10
# 6 roll 1
# 7 a_number 8
# 8 a_number 9
# 9 first_name 15
# 10 last_name 30
# 11 egrp 4
BEGIN { pagelen = 20; pagewidth = 126
lenheader = 4; lendetail = 1; lenfooter = 2 }
header() {
print trititle("XXXXXXX", "Report",
sprintf("Page %d", pagenum))
print ""
print " Posn S Title Begin Date End Date " \
"R A-Number First Name Last Name Egrp"
print "------ - ------------------------------ ---------- ---------- " \
"- -------- --------- --------------- ------------------------------ ----" }
detail(X) {
printf "%-6.6s %-1.1s %-30.30s %-10.10s %-10.10s %-1.1s %-8.8s %-9.9s " \
"%-15.15s %-30.30s %-4.4s\n", X[1], X[2], X[3], X[4], X[5],
X[6], X[7], X[8], X[9], X[10], X[11] }
footer() { print ""; print trititle(user "#" sid, one_up, today) }
Shell Script
#!/bin/sh
. $BANNER_HOME/local/exe/local_init.sh
H=/home/jobsub/${ORACLE_SID}_LOGS
PROG_LC=`echo $PROG | tr "[A-Z]" "[a-z]"`
PROG_UC=`echo $PROG | tr "[a-z]" "[A-Z]"`
CSV=$H/$(basename $PROG_LC .shl)_${ONE_UP}.csv
LOG=$H/$(basename $PROG_LC .shl)_${ONE_UP}.log
WHOAMI=$(whoami)
echo "BANUID = $BANUID" >> $LOG
echo "ONE_UP = $ONE_UP" >> $LOG
echo "PROG = $PROG" >> $LOG
echo "PRNT = $PRNT" >> $LOG
echo "ORACLE_SID = $ORACLE_SID" >> $LOG
echo "H = $H" >> $LOG
echo "PROG_LC = $PROG_LC" >> $LOG
echo "PROG_UC = $PROG_UC" >> $LOG
echo "CSV = $CSV" >> $LOG
echo "LOG = $LOG" >> $LOG
echo "WHOAMI = $WHOAMI" >> $LOG
echo "LOCAL_EXE = $LOCAL_EXE" >> $LOG
sqlplus -s $BAN9UID/#${TARGETDB} <<EOF
variable status number
begin :status := storeprocs.write_csv_file('$PROG_LC', $ONE_UP);
end;
/
exit :status
EOF
STATUS="$?"
echo "RETURN CODE = $STATUS" >> $LOG
if [ $STATUS -eq 0 ]
then echo "$PROG_UC completed successfully" >> $LOG
else echo "$PROG_UC completed with failure" >> $LOG
fi
if [ -f $LOCAL_EXE/$PROG_LC.awk ]
then LIS=$H/$(basename $PROG_LC .shl)_${ONE_UP}.lis
LC_NUMERIC=en_US.utf8 gawk -f $LOCAL_EXE/csvtolis.awk \
-f $LOCAL_EXE/$PROG_LC.awk $CSV > $LIS
gurinso -n $ONE_UP -l $LIS -j $PROG -w $BANUID $BAN9UID/#${TARGETDB}
fi
exit $STATUS
csvtolis.awk
BEGIN { linenum = 0
pagenum = 0
user = toupper(ENVIRON["BAN9UID"])
sid = ENVIRON["ORACLE_SID"]
oneup = ENVIRON["ONE_UP"]
"date +%m/%d/%Y" | getline today }
function csvsplit(str, arr, i,j,n,s,fs,qt) {
# split comma-separated fields into arr; return number of fields in arr
# fields surrounded by double-quotes may contain commas;
# doubled double-quotes represent a single embedded quote
delete arr; s = "START"; n = 0; fs = ","; qt = "\""
for (i = 1; i <= length(str); i++) {
if (s == "START") {
if (substr(str,i,1) == fs) { arr[++n] = "" }
else if (substr(str,i,1) == qt) { j = i+1; s = "INQUOTES" }
else { j = i; s = "INFIELD" } }
else if (s == "INFIELD") {
if (substr(str,i,1) == fs) {
arr[++n] = substr(str,j,i-j); j = 0; s = "START" } }
else if (s == "INQUOTES") {
if (substr(str,i,1) == qt) { s = "MAYBEDOUBLE" } }
else if (s == "MAYBEDOUBLE") {
if (substr(str,i,1) == fs) {
arr[++n] = substr(str,j,i-j-1)
gsub(qt qt, qt, arr[n]); j = 0; s = "START" } } }
if (s == "INFIELD" || s == "INQUOTES") { arr[++n] = substr(str,j) }
else if (s == "MAYBEDOUBLE") {
arr[++n] = substr(str,j,length(str)-j); gsub(qt qt, qt, arr[n]) }
else if (s == "START") { arr[++n] = "" }
return n }
function trititle(left, center, right, gap1, gap2) { # assume sufficient space
gap1 = int((pagewidth - length(center)) / 2) - length(left)
gap2 = pagewidth - length(left) - length(center) - length(right) - gap1
return left sprintf("%*s", gap1, "") center sprintf("%*s", gap2, "") right }
NR > 1 { nfields = csvsplit($0, csv); # print one record, with header/footer as needed
if (pagelen - (linenum % pagelen) - lenfooter < lendetail) {
while ((linenum + lenfooter) % pagelen != 0) { print ""; linenum++ }
footer(); linenum += lenfooter }
if (linenum % pagelen == 0) { pagenum++; header(); linenum += lenheader }
detail(csv); linenum += lendetail
if ((linenum + lenfooter) % pagelen == 0) { footer(); linenum += lenfooter } }
END { if (linenum % pagelen != 0) { # if not at top of page
while ((linenum + lenfooter) % pagelen != 0) { # while not at bottom
print ""; linenum++ } # skip to bottom
footer() } } # and print footer

Efficient Script to get all Extended File Properties

I'm pretty new working with Powershell and i have some working code but I'm not sure how to get it into an efficient routine to return all of the extended file properties of some video files i have.
I have:
# The basic setup for the next steps
$path = 'C:\test\videotocheck.mp4'
$shell = New-Object -COMObject Shell.Application
$folder = Split-Path $path
$file = Split-Path $path -Leaf
$shellfolder = $shell.Namespace($folder)
$shellfile = $shellfolder.ParseName($file)
# This command gets a list of all the extended attributes available for this file
0..500 | Foreach-Object { '{0} = {1}' -f $_, $shellfolder.GetDetailsOf($null, $_) }
# These commands get the individual attributes picked out of the list above
$shellfolder.GetDetailsOf($shellfile, 314)
$shellfolder.GetDetailsOf($shellfile, 316)
All i want to do is provide a filename and have it give me a list back of all of the attributes and their values (if they have one.)
Example:
I intend to use this in a SQL stored procedure. I can work with different types of output if that's easier.
I'm mostly interested in dimensions
Any guidance would be appreciated.
To get all of this extended metadate you could use the function below.
You can give it the path to a single file, or the path to a folder where the files are.
function Get-MetaData {
[CmdletBinding()]
[OutputType([Psobject[]])]
Param (
# Path can be the path to a folder or the full path and filename of a single file
[Parameter(Mandatory = $true, ValueFromPipeline = $true, Position = 0)]
[string]$Path,
# Pattern is unused if Path is pointing to a single file
[Alias('Filter')]
[string]$Pattern = '*.*',
[Alias('Indices')]
[int[]]$Properties = 1..500,
# Recurse is unused if Path is pointing to a single file
[switch]$Recurse,
[switch]$IncludeEmptyProperties
)
$item = Get-Item -Path $Path -ErrorAction SilentlyContinue
if (!$item) { Write-Error "$Path could not be found."; return }
if (!$item.PSIsContainer) {
# it's a file
$files = #($item)
$Path = $item.DirectoryName
}
else {
# it's a folder
$files = Get-ChildItem -Path $Path -Filter $Pattern -File -Recurse:$Recurse
}
$shell = New-Object -ComObject "Shell.Application"
$objDir = $shell.NameSpace($Path)
foreach($file in $files) {
$objFile = $objDir.ParseName($file.Name)
$mediaFile = $objDir.Items()
foreach($index in $Properties) {
$name = $objDir.GetDetailsOf($mediaFile, $index)
if (![string]::IsNullOrWhiteSpace($name)) {
$value = $objDir.GetDetailsOf($objFile, $index)
if (![string]::IsNullOrWhiteSpace($value) -or $IncludeEmptyProperties) {
[PsCustomObject]#{
Path = $file.FullName
Index = $index
Property = $name
Value = $value
}
}
}
}
}
# clean-up Com objects
$null = [System.Runtime.Interopservices.Marshal]::ReleaseComObject($objFile)
$null = [System.Runtime.Interopservices.Marshal]::ReleaseComObject($objDir)
$null = [System.Runtime.Interopservices.Marshal]::ReleaseComObject($shell)
[System.GC]::Collect()
[System.GC]::WaitForPendingFinalizers()
}
You can of course play around with the different parameters the function can take like -Pattern '*.mp4' to only list properties for mp4 files or add switch -IncludeEmptyProperties to also list properties that exist for that file type, but have no value for the specified file.
With parameter Properties you can give the function an array of int32 values of the property indices to return. If you leave that open, the function tries to get all properties from index 1 to index 500 (if available).
Most of the interesting property indices can be found at:
audio/video files: 0,1,2,3,4,5,9,11,12,13,14,15,16,17,18,19,20,21,22,26,27,28,36,164,165,194,213,220,223,237,243
font files:
0,1,2,3,4,5,20,21,25,33,34,164,165,166,196,310
image files:
0,1,2,3,4,5,9,11,31,164,165,174,175,176,177,178,194,196
Use it like this:
$result = Get-MetaData -Path '<pathToTheFile_OR_pathToTheFolder>'
# output to GridView
$result | Out-GridView
# output to CSV file
$result | Export-Csv -Path '<pathToTheOutput.csv>' -NoTypeInformation
Here's a simplified version similar to Theo's. It will accept file/folder paths or objects via pipeline or as parameter.
Function Get-FileMetaData {
[cmdletbinding()]
Param
(
[parameter(valuefrompipeline,ValueFromPipelineByPropertyName,Position=1,Mandatory)]
$InputObject
)
begin
{
$shell = New-Object -ComObject Shell.Application
}
process
{
foreach($object in $InputObject)
{
if($object -is [string])
{
try
{
$object = Get-Item $object -ErrorAction Stop
}
catch
{
Write-Warning "Error while processing $object : $($_.exception.message)"
break
}
}
try
{
Test-Path $object -ErrorAction Stop
}
catch
{
Write-Warning "Error while processing $($object.fullname) : $($_.exception.message)"
break
}
switch ($object)
{
{$_ -is [System.IO.DirectoryInfo]}{
write-host Processing folder $object.FullName -ForegroundColor Cyan
$currentfolder = $shell.namespace($object.FullName)
$items = $currentfolder.items()
}
{$_ -is [System.IO.FileInfo]}{
write-host Processing file $object.FullName -ForegroundColor Cyan
$parent = Split-Path $object
$currentfolder = $shell.namespace($parent)
$items = $currentfolder.ParseName((Split-Path $object -Leaf))
}
}
try
{
foreach($item in $items)
{
0..512 | ForEach-Object -Begin {$ht = [ordered]#{}}{
if($value = $currentfolder.GetDetailsOf($item,$_))
{
if($propname = $currentfolder.GetDetailsOf($null,$_))
{
$ht.Add($propname,$value)
}
}
} -End {[PSCustomObject]$ht}
}
}
catch
{
Write-Warning "Error while processing $($item.fullname) : $($_.exception.message)"
}
}
}
end
{
$shell = $null
}
}

Segmentation fault in the below program

The below code is being called from a simple script like this.
$test.line-validation();
method line-validation is rw {
my $file-data = slurp($!FileName, enc => "iso-8859-1");
my #lines = $file-data.lines;
my $start = now;
for #lines -> $line {
state $i = 1;
my #splitLine = split('|', $line);
if ($line.starts-with("H|") || $line.starts-with("T|")) {
next;
}
my $lnObject = LineValidation.new( line => $line, FileType => $.FileType );
$lnObject.ColumnIds = %.ColumnIds;
my #promises;
my #validationIds;
for %.ValidationRules.keys -> $validationId {
if (%.ValidationRules{$validationId}<ValidationType> eq 'COLUMN') {
push #promises, start {$lnObject.ColumnValidationFunction(%.ValidationRules{$validationId}<ValidationFunction>, %.ValidationRules{$validationId}<Arguments>, $.ValidationRules{$validationId}<Description>); 1};
push #validationIds, $validationId;
}
}
my #promise-output = await #promises;
for #validationIds -> $valId {
state $j = 0;
my $result = #promise-output[$j];
if ($result.Bool == True) {
if (%.ResultSet{$valId}<count> :!exists) {
%.ResultSet{$valId}<count> = 1;
} else {
%.ResultSet{$valId}<count> = %.ResultSet{$valId}<count> + 1;
}
my #prCol = (%.ValidationRules{$valId}<Arguments><column>, #.printColumns);
if (%.ResultSet{$valId}<count> <= 10) {
%.ResultSet{$valId}.push: (sample => join('|', #splitLine[#prCol[*;*]].map: { if ($_.Bool == False ) { $_ = ''} else {$_ = $_;} }));
}
%.ResultSet{$valId}<ColumnList> = #prCol[*;*];
}
$j++;
}
$i++;
}
say "Line validation completed in {now - $start } time for $.lineCount lines";
}
The code was working fine earlier but when run using larger files, it just arbitrarily throws the error Segmentation fault and exists. I cannot determine where it is failing either.

Auto response after #seconds & #ofmessagelines

I want to count #ofmessagelines and #amountoftime that has passed.
Main script:
menu channel {
Announce
.Start: .timerAnnounce. $+ $chan 0 11 msg $chan $$?="Hi msg"
.Stop: .timerAnnounce. $+ $chan off
}
**Example given **
ON *:TEXT:*:#: {
if (%announce) {
inc %msgcounter 1
if (%msgcounter >= 10) {
if ($calc($ctime - %announce) >= 600) {
msg #chan YOur msg here
unset %msgcounter
set %announce $ctime
}
}
}
}

tail -f | awk and end tail once data is found

I am trying to build a script which tail -f | awk the log file which is getting updated every second. awk part will fetch me only the required part of the log file based on my search parameter. Output XML is also captured in an output file. Script is working fine - as expected.
Issue - However ever after the search is performed it stays hung due to tail -f. Any idea how to update below script - so that once the output XML is captured, it should break the tail part??
XMLF=/appl/logs/abc.log
aa_pam=${1-xml}
[[ ${2-xml} = "xml" ]] && tof=xml_$(date +%Y%m%d%H%M%S).xml || tof=$2
tail -f $XMLF | \
awk ' BEGIN { Print_SW=0; Cnt_line=1; i=0}
/\<\?xml version\=/ { if (Print_SW==1) p_out(Cnt_Line,i)
Print_SW=0; Cnt_line=1;
}
{ Trap_arry[Cnt_line++]=$0;
}
/'${1-xml}'/ { Print_SW=1;
}
/\<\/XYZ_999/ { if (Print_SW==1) p_out(Cnt_Line, i);
Print_SW=0; Cnt_line=1; }
END { if (Print_SW==1) p_out(Cnt_Line, i); }
function p_out(Cnt_Line, i) {
for (i=1;i<Cnt_line;i++) {print Trap_arry[i] | "tee '$tof'" }
}
' | tee $tof
Update Tried as per below suggestion of using exit - it is existing the script successfully - however the xml that is getting captured at output is getting duplicated. So in the output file - same XML appears twice..!!
XMLF=/appl/logs/abc.log
aa_pam=${1-xml}
[[ ${2-xml} = "xml" ]] && tof=xml_$(date +%Y%m%d%H%M%S).xml || tof=$2
tail -f $XMLF | \
awk ' BEGIN { Print_SW=0; Cnt_line=1; i=0}
/\<\?xml version\=/ { if (Print_SW==1) p_out(Cnt_Line,i)
Print_SW=0; Cnt_line=1;
}
{ Trap_arry[Cnt_line++]=$0;
}
/'${1-xml}'/ { Print_SW=1;
}
/\<\/XYZ_999/ { if (Print_SW==1) p_out(Cnt_Line, i);
Print_SW=0; Cnt_line=1; }
END { if (Print_SW==1) p_out(Cnt_Line, i); }
function p_out(Cnt_Line, i) {
for (i=1;i<Cnt_line;i++) {print Trap_arry[i] | "tee '$tof'" } { exit }
}
' | tee $tof
Call exit (which will jump to your END block prior to termination) after you are finished capturing your output.
When awk terminates, the next write() to stdout by tail -f will result in an EPIPE error. tail knows to terminate when that happens.
UPDATE: You seem to be having some problem trying to decide where to put the exit. It should not be in p_out because you call p_out from both the closing XML tag match expression and from the END block. Try this instead:
XMLF=/appl/logs/abc.log
aa_pam=${1-xml}
[[ ${2-xml} = "xml" ]] && tof=xml_$(date +%Y%m%d%H%M%S).xml || tof=$2
tail -f $XMLF | \
awk '
BEGIN {
Print_SW=0
Cnt_line=1
i=0
}
/\<\?xml version\=/ {
if (Print_SW==1)
p_out(Cnt_Line,i)
Print_SW=0
Cnt_line=1
}
{
Trap_arry[Cnt_line++]=$0
}
/'${1-xml}'/ {
Print_SW=1;
}
/\<\/XYZ_999/ {
if (Print_SW==1)
p_out(Cnt_Line, i)
Print_SW=0
Cnt_line=1
exit
}
END {
if (Print_SW==1)
p_out(Cnt_Line, i);
}
function p_out(Cnt_Line, i) {
for (i=1;i<Cnt_line;i++) {
print Trap_arry[i] | "tee '$tof'"
}
}
' | tee $tof
You could, in the awk script, add a line such as:
/some-end-of-xml-marker/ { close(/dev/stdin) ; }
I didn't try it, but you get the idea: close STDIN when you reached the end of the file, so that the loop in awk stops and you get to the END part (not tested, I hope this proves to be correct...)
Based on this question How to break a tail -f command in bash you could try
#! /bin/bash
XMLF=/appl/logs/abc.log
aa_pam=${1-xml}
[[ ${2-xml} = "xml" ]] && tof=xml_$(date +%Y%m%d%H%M%S).xml || tof=$2
mkfifo log.pipe
tail -f "$XMLF" > log.pipe & tail_pid=$!
awk -vpar1="$aa_pam" -vtof="$tof" -f t.awk < log.pipe
kill $tail_pid
rm log.pipe
where t.awk is:
/<\?xml version\=/ {
if (Print_SW==1) {
p_out(Cnt_Line)
}
Print_SW=0
Cnt_line=0
}
{
Trap_arry[++Cnt_line]=$0
}
$0 ~ par1 {
Print_SW=1;
}
/<\/XYZ_999/ {
if (Print_SW==1)
p_out(Cnt_Line)
Print_SW=0
Cnt_line=0
}
function p_out(Cnt_Line, i) {
for (i=1; i<Cnt_line; i++) {
print Trap_arry[i] | ("tee " tof)
}
exit 1
}