how to connect and read values from kepware using OPCAutomation.dll - dll

I am creating a small c# program to connect and read value from kepware server using OPCAutomation.dll, but unable to get its syntax?
OPCAutomation.OPCServer _OPCServer = new OPCAutomation.OPCServer();
_OPCServer.connect("", ""......);
what values will come inside these brackets?

OPCAutomation.OPCServer _OPCServer = new OPCAutomation.OPCServer();
_OPCServer.connect("Kepware.KEPServerEX.V5", "");
The second parameter is the OPC Server node and can be left String.Empty.
From Reflector:
public virtual extern void Connect([In, MarshalAs(UnmanagedType.BStr)] string ProgID, [In, Optional, MarshalAs(UnmanagedType.Struct)] object Node);
I'm adding an explample to read and write values:
// set up some variables
OPCServer ConnectedOpc = new OPCServer();
Array OPCItemIDs = Array.CreateInstance(typeof(string), 10);
Array ItemServerHandles = Array.CreateInstance(typeof(Int32), 10);
Array ItemServerErrors = Array.CreateInstance(typeof(Int32), 10);
Array ClientHandles = Array.CreateInstance(typeof(Int32), 10);
Array RequestedDataTypes = Array.CreateInstance(typeof(Int16), 10);
Array AccessPaths = Array.CreateInstance(typeof(string), 10);
OPCGroup OpcGroupNames;
// connect to KepServerEX
ConnectedOpc.Connect("Kepware.KEPServerEX.V5", "");
// Add tags and OPC group.
// set up the tags
OPCItemIDs.SetValue("Counting.PLC.Station1.LoggedON", 1);
OPCItemIDs.SetValue("Counting.PLC.Station2.LoggedON", 2);
OPCItemIDs.SetValue("Counting.PLC.Station3.LoggedON", 3);
OPCItemIDs.SetValue("Counting.PLC.Station1.Operator", 4);
OPCItemIDs.SetValue("Counting.PLC.Station2.Operator", 5);
OPCItemIDs.SetValue("Counting.PLC.Station3.Operator", 6);
// set up the opc group
OpcGroupNames = ConnectedOpc.OPCGroups.Add("Group01");
OpcGroupNames.DeadBand = 0;
OpcGroupNames.UpdateRate = 100;
OpcGroupNames.IsSubscribed = true;
OpcGroupNames.IsActive = true;
OpcGroupNames.OPCItems.AddItems(6, ref OPCItemIDs, ref ClientHandles, out ItemServerHandles, out ItemServerErrors, RequestedDataTypes, AccessPaths);
// Read the values from the server for those tags.
// read
Array ItemServerReadValues = Array.CreateInstance(typeof(string), 10);
object a;
object b;
OpcGroupNames.SyncRead((short)OPCAutomation.OPCDataSource.OPCDevice, 6, ref ItemServerHandles, out ItemServerReadValues, out ItemServerErrors, out a, out b);
Console.WriteLine((string)ItemServerReadValues.GetValue(4));
Console.WriteLine((string)ItemServerReadValues.GetValue(5));
Console.WriteLine((string)ItemServerReadValues.GetValue(6));
// Write some values into the server for those tags.
// write
Array ItemServerWriteValues = Array.CreateInstance(typeof(object), 7);
ItemServerWriteValues.SetValue(1, 1);
ItemServerWriteValues.SetValue(1, 2);
ItemServerWriteValues.SetValue(1, 3);
ItemServerWriteValues.SetValue("Test Op 1", 4);
ItemServerWriteValues.SetValue("Test Op 2", 5);
ItemServerWriteValues.SetValue("Test Op 3", 6);
OpcGroupNames.SyncWrite(6, ref ItemServerHandles, ref ItemServerReadValues, out ItemServerErrors);
This example is adapted from: http://lifeisunderconstruction.blogspot.mx/2011/03/opc-client-in-asp-net-c.html, I have added it just in case the link gets broken.

Related

How to pass a param for a binding in PostgreSQL - COPY (... ) TO STDOUT (FORMAT binary)?

I have some simple test table in postgres like below:
--DROP TABLE test_point
CREATE TABLE test_point
(
serie_id INT NOT NULL,
version_ts INT NOT NULL,
PRIMARY KEY (serie_id, version_ts)
);
I try to load a data from it by using COPY TO STDOUT and binary buffers. This is sql definition I use in a test case:
COPY (
SELECT version_ts
FROM test_point
WHERE
serie_id = $1::int
) TO STDOUT (FORMAT binary);
It works ok, if I don't provide any param to bind to in SQL. If I use simple select it recognizes params also as well.
I was trying to provide explicit info about param type during stmt preparation also, but results were similar (it doesn't recognize param).
This is a message I receive during the test case:
0x000001740a288ab0 "ERROR: bind message supplies 1 parameters, but prepared statement \"test1\" requires 0\n"
How to properly provide a param for COPY() statement?
I don't want to cut/concatenate strings for timestamp params and similar types.
Below is a test case showing the issue.
TEST(TSStorage, CopyParamTest)
{
auto sql = R"(
COPY (
SELECT version_ts
FROM test_point
WHERE
serie_id = $1::int
) TO STDOUT (FORMAT binary);
)";
auto connPtr = PQconnectdb("postgresql://postgres:pswd#localhost/some_db");
auto result = PQprepare(connPtr, "test1", sql, 0, nullptr);
// Lambda to test result status
auto testRes = [&](ExecStatusType status)
{
if (PQresultStatus(result) != status)
{
PQclear(result);
auto errorMsg = PQerrorMessage(connPtr);
PQfinish(connPtr);
throw std::runtime_error(errorMsg);
}
};
testRes(PGRES_COMMAND_OK);
PQclear(result);
int seriesIdParam = htonl(5);
const char *paramValues[] = {(const char *)&seriesIdParam};
const int paramLengths[] = {sizeof(seriesIdParam)};
const int paramFormats[] = {1}; // 1 means binary
// Execute prepared statement
result = PQexecPrepared(connPtr,
"test1",
1, //nParams,
paramValues,
paramLengths,
paramFormats,
1); // Output format - binary
// Ensure it's in COPY_OUT state
//testRes(PGRES_COPY_OUT);
if (PQresultStatus(result) != PGRES_COPY_OUT)
{
auto errorMsg = PQerrorMessage(connPtr);
int set_breakpoint_here = 0; // !!! !!! !!!
}
PQclear(result);
PQfinish(connPtr);
}

Compile all values across multiple delimited strings into a table

I am collecting responses to an online survey form in a table like this:
CREATE TABLE [Survey]
(
ID int IDENTITY(1,1) NOT NULL,
UserName varchar(50) NOT NULL,
Responses varchar(max) NOT NULL,
Taken datetime NOT NULL
)
When the user clicks the submit button, a process grabs all the checkboxes that were clicked and concatenates their names into a delimited string, and stuffs that into the table along with the other fields. Essentially same as:
INSERT INTO [Survey] (UserName, Responses, Taken) VALUES ('John', 'chkSize', GetDate())
INSERT INTO [Survey] (UserName, Responses, Taken) VALUES ('Mary', 'chkSquare;chkSoft', GetDate())
INSERT INTO [Survey] (UserName, Responses, Taken) VALUES ('Steve', 'chkSize;chkYellow;chkRound', GetDate())
INSERT INTO [Survey] (UserName, Responses, Taken) VALUES ('April', 'chkRound;chkStacked;chkFiltered;chkBrown', GetDate())
Is there a way to easily go through all the "Responses" for the whole table, find all possible values, and then return them as a Unique list in their own table? i.e.:
chkBrown
chkFiltered
chkRound
chkSize
chkSoft
chkSquare
chkStacked
chkYellow
You can do what you want using string_split():
select s.value, count(*)
from survey su cross apply
string_split(su.responses, ';') s
group by s.value;
Here is a db<>fiddle.
The fact that you can do this does not mean that you should. You should store the responses in a separate table, with one row per response.
If this is just a simple one page, check box only survey, one approach is
// declare flags enum
[Flags]
public enum Checkboxes : int
{
none = 0,
chkBrown = 1,
chkFiltered = 2,
chkRound = 4,
chkSize = 8,
chkSoft = 16,
chkSquare = 32,
chkStacked = 64,
chkYellow = 128
}
// on initialize/constructor add these values to your checkbox tag
chkBrown.Tag = Checkboxes.chkBrown ;
// Add checkbox extension
public shared Checkboxes GetCode(this Checkbox cb)
{
if (cb.Checked)
return (Checkboxes)cb.Tag;
return Checkboxes.none;
}
// your db value would be
Checkboxes val = chkBrown.GetCode() | chkFiltered() . . . // list all c-boxes here
// make db field integer and save this value:
(int)val
But... again. This is only good if no changes will be required and the system is static. This seem to be homework and no long term issue. But in such cases, long term scenario is many-to-many table, where you can have multiple records for same question posted as separate record. This way, SQL search is easy
Here is working fiddle where you can also see how to set your checkbox to a value retrieved from the number
using System;
public class ClsVal// instead of checkbox
{
public bool A {get; set;}
public Checkboxes C {get; set;}
}
public static class ClsValExt
{
public static Checkboxes GetCode(this ClsVal cb)
{
if (cb.A)
return (Checkboxes)cb.C;
return Checkboxes.none;
}
}
[Flags]
public enum Checkboxes : int
{
none = 0,
chkBrown = 1,
chkFiltered = 2,
chkRound = 4,
chkSize = 8,
chkSoft = 16,
chkSquare = 32,
chkStacked = 64,
chkYellow = 128
}
public class Program
{
public static void Main()
{
var c1 = new ClsVal() {A = true, C = Checkboxes.chkBrown};
var c2 = new ClsVal() {A = true, C = Checkboxes.chkFiltered};
var c3 = new ClsVal() {A = false, C = Checkboxes.chkRound};
var c4 = new ClsVal() {A = true, C = Checkboxes.chkSize};
var x = c2.GetCode() | c1.GetCode() | c3.GetCode() | c4.GetCode();
var i = (int)x;
Console.WriteLine(i);
Console.WriteLine((x & Checkboxes.chkBrown) == Checkboxes.chkBrown); //Yes
Console.WriteLine((x & Checkboxes.chkBrown) == Checkboxes.chkYellow); // No
}
}

What is the right way to get term positions in a Lucene document?

The example in this question and some others I've seen on the web use postings method of a TermVector to get terms positions. Copy paste from the example in the linked question:
IndexReader ir = obtainIndexReader();
Terms tv = ir.getTermVector( doc, field );
TermsEnum terms = tv.iterator();
PostingsEnum p = null;
while( terms.next() != null ) {
p = terms.postings( p, PostingsEnum.ALL );
while( p.nextDoc() != PostingsEnum.NO_MORE_DOCS ) {
int freq = p.freq();
for( int i = 0; i < freq; i++ ) {
int pos = p.nextPosition(); // Always returns -1!!!
BytesRef data = p.getPayload();
doStuff( freq, pos, data ); // Fails miserably, of course.
}
}
}
This code works for me but what drives me mad is that the Terms type is where the position information is kept. All the documentation I've seen keep saying that term vectors keep position data. However, there are no methods on this type to get that information!
Older versions of Lucene apparently had a method but as of at least version 6.5.1 of Lucene, that is not the case.
Instead I'm supposed to use postings method and traverse the documents but I already know which document I want to work on!
The API documentation does not say anything about postings returning only the current document (the one the term vector belongs to) but when I run it, I only get the current doc.
Is this the correct and only way to get position data from term vectors? Why such an unintuitive API? Is there a document that explains why the previous approach changed in favour of this?
Don't know about "right or wrong" but for version 6.6.3 this seems to work.
private void run() throws Exception {
Directory directory = new RAMDirectory();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new StandardAnalyzer());
IndexWriter writer = new IndexWriter(directory, indexWriterConfig);
Document doc = new Document();
// Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES
FieldType type = new FieldType();
type.setStoreTermVectors(true);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectorOffsets(true);
type.setIndexOptions(IndexOptions.DOCS);
Field fieldStore = new Field("tags", "foo bar and then some", type);
doc.add(fieldStore);
writer.addDocument(doc);
writer.close();
DirectoryReader reader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
Term t = new Term("tags", "bar");
Query q = new TermQuery(t);
TopDocs results = searcher.search(q, 1);
for ( ScoreDoc scoreDoc: results.scoreDocs ) {
Fields termVs = reader.getTermVectors(scoreDoc.doc);
Terms f = termVs.terms("tags");
TermsEnum te = f.iterator();
PostingsEnum docsAndPosEnum = null;
BytesRef bytesRef;
while ( (bytesRef = te.next()) != null ) {
docsAndPosEnum = te.postings(docsAndPosEnum, PostingsEnum.ALL);
// for each term (iterator next) in this field (field)
// iterate over the docs (should only be one)
int nextDoc = docsAndPosEnum.nextDoc();
assert nextDoc != DocIdSetIterator.NO_MORE_DOCS;
final int fr = docsAndPosEnum.freq();
final int p = docsAndPosEnum.nextPosition();
final int o = docsAndPosEnum.startOffset();
System.out.println("p="+ p + ", o=" + o + ", l=" + bytesRef.length + ", f=" + fr + ", s=" + bytesRef.utf8ToString());
}
}
}

Hash map iteration

How do I iterate through a hash map to find the first 10 elements for eg if my map contains string as key and int as value, I want to fetch the first 10 values with highest integer?
Let's say we have Map and we are allowed to use external library - Guava:
Map<String, Integer> map = Maps.newTreeMap();
map.put("A", 13);
map.put("B", 11);
map.put("C", 27);
map.put("D", 38);
map.put("E", 25);
map.put("F", 12);
map.put("G", 25);
map.put("D", 35);
map.put("H", 28);
map.put("R", 13);
map.put("N", 24);
map.put("T", 37);
Create Guava MultiMap and add entries from original map
Multimap<String, Integer> multiMap = ArrayListMultimap.create();
for(String key : map.keySet()){
multiMap.put(key, map.get(key));
}
Inverse multiMap and copy to TreeMultiMap
TreeMultimap<Integer, String> reversed = TreeMultimap.create();
Multimaps.invertFrom(multiMap, reversed);
Create List from entries and get first 10 elements:
Lists.newArrayList(reversed.entries()).subList(0,10)
If it's a one time thing, you can sort the HashMap by converting to a List then back to a LinkedHashMap:
Map<String, Integer> map = new HashMap<>();
map.put("A", 13);
map.put("B", 11);
map.put("C", 27);
map.put("D", 38);
map.put("E", 25);
map.put("F", 12);
map.put("G", 25);
map.put("D", 35);
map.put("H", 28);
map.put("R", 13);
map.put("N", 24);
map.put("T", 37);
// Take a List copy of the Map
List<Entry<String, Integer>> list = new ArrayList<Entry<String, Integer>>(map.entrySet());
// Sort the list by the Value
Collections.sort(list, new Comparator<Entry<String, Integer>>() {
#Override
public int compare(Entry<String, Integer> o1, Entry<String, Integer> o2) {
return (o1.getValue()).compareTo(o2.getValue());
}
});
// Create new Map (use LinkedHashMap to maintain order)
Map<String, Integer> sortedMap = new LinkedHashMap<String, Integer>();
for (Entry<String, Integer> entry : list) {
sortedMap.put(entry.getKey(), entry.getValue());
}
Most HashMaps don`t preserve any kind of order so you might need to read all the keys, sort them and then get the corresponding values from the Hash. If you can tell us what language and provide some sample code, someone might be able to help further.

Amazon RDS w/ SQL Server wont allow bulk insert from CSV source

I've tried two methods and both fall flat...
BULK INSERT TEMPUSERIMPORT1357081926
FROM 'C:\uploads\19E0E1.csv'
WITH (FIELDTERMINATOR = ',',ROWTERMINATOR = '\n')
You do not have permission to use the bulk load statement.
but you cannot enable that SQL Role with Amazon RDS?
So I tried... using openrowset but it requires AdHoc Queries to be enabled which I don't have permission to do!
I know this question is really old, but it was the first question that came up when I searched bulk inserting into an aws sql server rds instance. Things have changed and you can now do it after integrating the RDS instance with S3. I answered this question in more detail on this question. But overall gist is that you setup the instance with the proper role, put your file on S3, then you can copy the file over to RDS with the following commands:
exec msdb.dbo.rds_download_from_s3
#s3_arn_of_file='arn:aws:s3:::bucket_name/bulk_data.csv',
#rds_file_path='D:\S3\seed_data\data.csv',
#overwrite_file=1;
Then BULK INSERT will work:
FROM 'D:\S3\seed_data\data.csv'
WITH
(
FIRSTROW = 2,
FIELDTERMINATOR = ',',
ROWTERMINATOR = '\n'
)
AWS doc
You can enable ad hoc distributed queries via heading to your Amazon Management Console, navigating to your RDS menu and then creating a DB Parameter group with ad hoc distributed queries set to 1, and then attaching this parameter group to your DB instance.
Don't forget to reboot your DB once you have made these changes.
Here is the source of my information:
http://blogs.lessthandot.com/index.php/datamgmt/dbadmin/turning-on-optimize-for-ad/
Hope this helps you.
2022
I'm adding for anyone like me who wants to quickly insert data into RDS from C#
While RDS allows csv bulk uploads directly from S3 instances, there are times when you just want to directly upload data straight from your program.
I've written a C# utility method which does inserts using a StringBuilder to concatenate statements to do 2000 inserts per call, which is way faster than an ORM like dapper which does one insert per call.
This method should handle date, int, double, and varchar fields, but I haven't had to use it for character escaping or anything like that.
//call as
FastInsert.Insert(MyDbConnection, new object[]{{someField = "someValue"}}, "my_table");
class FastInsert
{
static int rowSize = 2000;
internal static void Insert(IDbConnection connection, object[] data, string targetTable)
{
var props = data[0].GetType().GetProperties();
var names = props.Select(x => x.Name).ToList();
foreach(var batch in data.Batch(rowSize))
{
var sb = new StringBuilder($"insert into {targetTable} ({string.Join(",", names)})");
string lastLine = "";
foreach(var row in batch)
{
sb.Append(lastLine);
var values = props.Select(prop => CreateSQLString(row, prop));
lastLine = $"select '{string.Join("','", values)}' union all ";
}
lastLine = lastLine.Substring(0, lastLine.Length - " union all".Length) + " from dual";
sb.Append(lastLine);
var fullQuery = sb.ToString();
connection.Execute(fullQuery);
}
}
private static string CreateSQLString(object row, PropertyInfo prop)
{
var value = prop.GetValue(row);
if (value == null) return "null";
if (prop.PropertyType == typeof(DateTime))
{
return $"'{((DateTime)value).ToString("yyyy-MM-dd HH:mm:ss")}'";
}
//if (prop.PropertyType == typeof(string))
//{
return $"'{value.ToString().Replace("'", "''")}'";
//}
}
}
static class Extensions
{
public static IEnumerable<T[]> Batch<T>(this IEnumerable<T> source, int size) //split an IEnumerable into batches
{
T[] bucket = null;
var count = 0;
foreach (var item in source)
{
if (bucket == null)
bucket = new T[size];
bucket[count++] = item;
if (count != size)
continue;
yield return bucket;
bucket = null;
count = 0;
}
// Return the last bucket with all remaining elements
if (bucket != null && count > 0)
{
Array.Resize(ref bucket, count);
yield return bucket;
}
}
}