About SBC function rstan::sbc - rstan

Here I use the notations from the following example codes of SBC.
I have two questions about rstan:sbc.
Is it possible to extract the samples y described in y = binomial_rng(N, pi_); in the transformed data block ?
Is it possible to plot the rank statistics of user specified parameters only?.
If multiple parameter such as;
parameters {
real<lower = 0, upper = 1> pi;
real<lower = 0, upper = 1> ppi;
real<lower = 0, upper = 1> pppi;
}
then I want to plot the rank statistics specified parameter only.
E.g., the following manner
plot(output, pars =c("pi","ppi"))
where output is an return value of rstan::sbc.
Example of SBC
data {
int<lower = 1> N;
real<lower = 0> a;
real<lower = 0> b;
}
transformed data { // these adhere to the conventions above
real pi_ = beta_rng(a, b);
int y = binomial_rng(N, pi_);
}
parameters {
real<lower = 0, upper = 1> pi;
}
model {
target += beta_lpdf(pi | a, b);
target += binomial_lpmf(y | N, pi);
}
generated quantities { // these adhere to the conventions above
int y_ = y;
vector[1] pars_;
int ranks_[1] = {pi > pi_};
vector[N] log_lik;
pars_[1] = pi_;
for (n in 1:y) log_lik[n] = bernoulli_lpmf(1 | pi);
for (n in (y + 1):N) log_lik[n] = bernoulli_lpmf(0 | pi);
}
Edit for comments
Let output be a return value of rstan::sbc, then
output$Y is the following:
:
:
:
[[496]]
named numeric(0)
[[497]]
named numeric(0)
[[498]]
named numeric(0)
[[499]]
named numeric(0)
[[500]]
named numeric(0)
Edit for comments 2
Let stanmodel be an object of class stanmodel for the following .stan file. Then the following object output$Y is not functional.
output <- rstan::sbc(stanmodel,
data = list(
ww=-0.81,www =0.001,
mm=0.65,mmm=0.001,
vv=5.31,vvv=0.001,
zz= 1.55,zzz=0.001,
NL = 259, NI = 57,C=3,c=3:1,N=3
), M = 500, refresh = 0)
Stan file
data{ // SBC
//This is not prior truth data, but somedata to run
int <lower=0>N; //This is exactly same as C
int <lower=0>NL; //Number of Binomial trials
int <lower=0>NI; //This is redandunt
int <lower=0>C; // Number of Confidence level
int <lower=0>c[N]; //Each component means confidence level
//Prior which shold be specified
real www;
real mmm;
real vvv;
real zzz;
real zz;
real ww;
real vv;
real mm;
}
transformed data {
int h[C];
int f[C];
real w_ ;
real <lower=0>dz_[C-1] ;
real m_;
real <lower =0> v_;
real <lower=0,upper=1>p_[C];
real <lower=0>l_[C];
real <lower=0>dl_[C];
real z_[C];
real a_;
real <lower=0>b_;
w_ = normal_rng (ww,www);
for(cd in 1:C-1) dz_[cd] = normal_rng (zz,zzz);
m_ = normal_rng (mm,mmm);
v_ = normal_rng (vv,vvv);
a_=m_/v_;
b_=1/v_;
for(cd in 1 : C-1) { z_[1]=w_;
z_[cd+1] =z_[cd] +dz_[cd];
}
for(cd in 1 : C) { if (cd==C) {
p_[cd] = 1 - Phi((z_[cd] - m_)/v_);
}else{
p_[cd] = Phi((z_[cd+1] - m_)/v_)- Phi( (z_[cd] -m_)/v_);
}
}
for(cd in 1 : C) {l_[cd] = (-1)*log(Phi(z_[cd])); }
for(cd in 1:C){
if (cd==C) {dl_[cd]=fabs(l_[cd]-0);
}else{
dl_[cd]=fabs(l_[cd]-l_[cd+1]);
}
}
for(n in 1:N) {
h[n] = binomial_rng(NL, p_[c[n]]);
// fff[n] ~ poisson( l[c[n]]*NL);//Non-Chakraborty's model
f[n] = poisson_rng (dl_[c[n]]*NI);//Chakraborty's model //<-------very very very coution, not n but c[n] 2019 Jun 21
// fff[n] ~ poisson( l[c[n]]*NI);//Non-Chakraborty's model
}
}
parameters{
real w;
real <lower =0>dz[C-1];
real m;
real <lower=0>v;
}
transformed parameters {
real <lower=0,upper=1>p[C];
real <lower=0>l[C];
real <lower=0>dl[C];
real z[C];
real a;
real b;
a=m/v;
b=1/v;
for(cd in 1 : C-1) { z[1] = w;
z[cd+1] = z[cd] +dz[cd];
}
for(cd in 1 : C) {
if (cd==C) { p[cd] = 1 - Phi((z[cd] -m)/v);
}else{
p[cd] = Phi((z[cd+1] -m)/v)- Phi((z[cd] -m)/v);
}
}
for(cd in 1 : C) { l[cd] = (-1)*log(Phi(z[cd])); }
for(cd in 1:C){
if (cd==C) {dl[cd] = fabs(l[cd]-0);
}else{
dl[cd] = fabs(l[cd]-l[cd+1]);
}
}
}
model{
for(n in 1:N) {
h[n] ~ binomial(NL, p[c[n]]);
// fff[n] ~ poisson( l[c[n]]*NL);//Non-Chakraborty's model
f[n] ~ poisson(dl[c[n]]*NI);//Chakraborty's model //<-------very very very coution, not n but c[n] 2019 Jun 21
// fff[n] ~ poisson( l[c[n]]*NI);//Non-Chakraborty's model
}
// priors
w ~ normal(ww,www);
for(cd in 1:C-1) dz[cd] ~ normal(zz,zzz);
m ~ normal(mm,mmm);
v ~ normal(vv,vvv);
}
generated quantities { // these adhere to the conventions above
int h_[C];
int f_[C];
vector [3 + C - 1] pars_;
int ranks_[3 + C - 1];
ranks_[1] = w > w_;
ranks_[2] = m > m_;
ranks_[3] = v > v_;
for (cd in 1:(C - 1)) ranks_[cd+3] = dz[cd] > dz_[cd];
pars_[1] = w_;
pars_[2] = m_;
pars_[3] = v_;
for (cd in 1:(C - 1)) pars_[cd+3] = dz_[cd];
// Here I copy the prior predictive realizations to y_ , and now it is denoted by h_ or f_
h_ = h;
f_ = f;
}

The list produced by sbc has an element called Y that holds realizations of the prior predictive distribution. There is no option (yet) to plot a subset of parameters, but you could make your own plot fairly easily based on the original code
https://github.com/stan-dev/rstan/blob/develop/rstan/rstan/R/SBC.R#L96

Related

Running solve multiple timese

I need to run a solve three times. Every time solve needs to have different input from different columns of a tuple. That is why I need to access the loop variable with in the OPL as a parameter and need to change that parameter with every loop. Please suggest how to do that in ODM OPL.
(I am able to do it when running a standalone model with a physical .dat file by introducing a int in dat file and changing its values with each loop, but same is not possible when running through an ODM application).
You can do this using a scripting main() function:
.dat file:
param = 0; // This value is actually never used
.mod file:
tuple T {
int round1;
int round2;
}
T t = <1, 2>;
int param = ...;
dvar float x;
minimize x;
subject to { x >= param; }
main {
thisOplModel.generate();
var def = thisOplModel.modelDefinition;
var data = thisOplModel.dataElements;
for (var i = 0; i < 2; ++i) {
if (i == 0)
data.param = thisOplModel.t.round1;
else
data.param = thisOplModel.t.round2;
var opl = new IloOplModel(def, cplex);
opl.addDataSource(data);
opl.generate();
cplex.solve();
writeln("Round " + i + ": " + cplex.getObjValue() + ", " + data.param);
opl.end();
}
}
The scripting code modifies the data before creating a new model in each iteration. You have a more elaborate version of code like this in the cutstock_main.mod example that ships with CPLEX.
What Daniel wrote works fine. If you do not want to have the non necessary .dat file you could write
sub.mod
tuple T {
int round1;
int round2;
}
T t = <1, 2>;
int param = ...;
dvar float x;
minimize x;
subject to { x >= param; }
and then in another model that will be the main one:
tuple T {
int round1;
int round2;
}
T t = <1, 2>;
main {
thisOplModel.generate();
var src = new IloOplModelSource("sub.mod");
var def=new IloOplModelDefinition(src);
var data = new IloOplDataElements();;
for (var i = 0; i < 2; ++i) {
if (i == 0)
data.param = thisOplModel.t.round1;
else
data.param = thisOplModel.t.round2;
var opl = new IloOplModel(def, cplex);
opl.addDataSource(data);
opl.generate();
cplex.solve();
writeln("Round " + i + ": " + cplex.getObjValue() + ", " + data.param);
opl.end();
}
}
which will give
Round 0: 1, 1
Round 1: 2, 2
and
tuple T {
int round1;
int round2;
}
T t = <1, 2>;
int solutions[0..1];
main {
thisOplModel.generate();
var src = new IloOplModelSource("sub.mod");
var def=new IloOplModelDefinition(src);
var data = new IloOplDataElements();;
for (var i = 0; i < 2; ++i) {
if (i == 0)
data.param = thisOplModel.t.round1;
else
data.param = thisOplModel.t.round2;
var opl = new IloOplModel(def, cplex);
opl.addDataSource(data);
opl.generate();
cplex.solve();
writeln("Round " + i + ": " + cplex.getObjValue() + ", " + data.param);
thisOplModel.solutions[i]=opl.x.solutionValue;
opl.end();
}
writeln(thisOplModel.solutions);
}
to address your next question about populating tables
which gives
Round 0: 1, 1
Round 1: 2, 2
[1 2]

Possibility to reduce nested loops

I want to bypass deep nesting of several for-loops when creating a list off all possible Transformations.
Right now, this block of code is used:
val allTransformations = ArrayList<Transformation>().apply {
for (moveZ in 0..4)
for (moveY in 0..4)
for (moveX in 0..4)
for (rotateZ in 0..3)
for (rotateY in 0..3)
for (rotateX in 0..3)
add(Transformation(rotateX, rotateY, rotateZ, moveX, moveY, moveZ))
}
Although this is quite straight forward, I was wondering if Kotlin provides other tools to write this in a single line.
I want to retrieve the same list, with less code and see if this results in less clutter.
This solution can be invoked with:
loopOverRanges(0..4, 0..4, 0..4, 0..3, 0..3, 0..3) { result ->
// result[0], result[1], result[2], result[3], result[4], result[5]
}
This is defined as such:
fun loopOverRanges(
vararg ranges: IntRange,
function: (IntArray) -> Unit
) {
val result = IntArray(ranges.size) { index -> ranges[index].first }
val productOfRangeLengths = ranges
.map { it.toList().size }
.product()
for (i in 0 until productOfRangeLengths) {
function(result)
result[0] += ranges[0].step
for (rangeIndex in 0 until ranges.size - 1) {
if (result[rangeIndex] == ranges[rangeIndex].last) {
result[rangeIndex] = ranges[rangeIndex].first
result[rangeIndex + 1] += ranges[rangeIndex].step
}
}
}
}
It's questionable to say the least if this improves readability. It removes the need for nesting, which can be useful for a large amount of ranges. It does not give the instant recognition of my initial nested loop; it also hides named parameters and may throw an IndexOutOfBounds when result[TOO_HIGH_INT] is retrieved.
It was a fun little investigation, but I lean towards not using it.
Here is how to do it with one loop. Just like incrementing a counter with 6 digits. When the first digit overflow, carry to the second digit and reset the first digit. etc.
fun loopOverRanges(a:IntRange,b:IntRange,c:IntRange,d:IntRange,e:IntRange,f:IntRange) : ArrayList<Transformation>
{
val x = a.count() * b.count() * c.count() * d.count() * e.count() * f.count()
val list : ArrayList<Transformation> = ArrayList()
var rx = f.first
var ry = e.first
var rz = d.first
var mx = c.first
var my = b.first
var mz = a.first
for(i in 0 until x)
{
list.add(Transformation(rx,ry,rz,mx,my,mz))
when{
rx < f.last -> rx += 1
ry < e.last -> {
rx = f.first
ry += 1
}
rz < d.last -> {
rx = f.first
ry = e.first
rz += 1
}
mx < c.last -> {
rx = f.first
ry = e.first
rz = d.first
mx += 1
}
my < b.last -> {
rx = f.first
ry = e.first
rz = d.first
mx = c.first
my += 1
}
mz < a.last -> {
rx = f.first
ry = e.first
rz = d.first
mx = c.first
my = b.first
mz += 1
}
}
}
}
return list
}
It can be simplified as
fun loopOverRanges(a:IntRange,b:IntRange,c:IntRange,d:IntRange,e:IntRange,f:IntRange) : ArrayList<Transformation>
{
data class Digit(var value :Int, val range:IntRange)
val list : ArrayList<Transformation> = ArrayList()
val digits = arrayOf(Digit(a.first,a),Digit(b.first,b),Digit(c.first,c),Digit(d.first,d),Digit(e.first,e),Digit(f.first,f))
val x = digits.fold(1){acc,digit -> acc * digit.range.count() }
for(i in 0 until x)
{
list.add(Transformation(digits[5].value,digits[4].value,digits[3].value,digits[2].value,digits[1].value,digits[0].value))
val j = digits.indexOfFirst { it.value < it.range.last }
if(j >= 0)
{
for(k in 0 until j )
{
digits[k].value = digits[k].range.first
}
digits[j].value += 1
}
}
return list
}

Is there a way to generate 2D stretched mesh using CGAL?

I currently use CGAL to generate 2D Delaunay triangulation.One of the mesh control parameter is the maximum length of the triangle edge. The examples suggests that this parameter is a constant. I would like to know how this parameter be made function of some thing else, for example spatial location.
I think Delaunay meshing with variable density is not directly supported by CGAL although you could mesh your regions independently. Alternatively you may have a look at: http://www.geom.at/advanced-mesh-generation/ where I have implemented that as a callback function.
It doesn't look like CGAL provides an example of this but they machinery is all there. The details get a little complicated since the objects that control if triangles need to be refined also have to understand the priority under which triangles get refined.
To do this, I copied Delaunay_mesh_size_criteria_2 to create a new class (Delaunay_mesh_user_criteria_2) that has a spatially varying sizing field. Buried in the class is a function (user_sizing_field) that can be implemented with a varying size field based on location. The code below compares the size of the longest edge of the triangle to the minimum of the sizing field at the three vertices, but you could use a size at the barycenter or circumcenter or even send the entire triangle to the sizing function if you have a good way to compute the smallest allowable size on the triangle altogether.
This is a starting point, although a better solution would,
refactor some things to avoid so much duplication with with existing Delaunay_mesh_size_criteria,
allow the user to pass in the sizing function as an argument to the criteria object, and
be shipped with CGAL.
template <class CDT>
class Delaunay_mesh_user_criteria_2 :
public virtual Delaunay_mesh_criteria_2<CDT>
{
protected:
typedef typename CDT::Geom_traits Geom_traits;
double sizebound;
public:
typedef Delaunay_mesh_criteria_2<CDT> Base;
Delaunay_mesh_user_criteria_2(const double aspect_bound = 0.125,
const Geom_traits& traits = Geom_traits())
: Base(aspect_bound, traits){}
// first: squared_minimum_sine
// second: size
struct Quality : public std::pair<double, double>
{
typedef std::pair<double, double> Base;
Quality() : Base() {};
Quality(double _sine, double _size) : Base(_sine, _size) {}
const double& size() const { return second; }
const double& sine() const { return first; }
// q1<q2 means q1 is prioritised over q2
// ( q1 == *this, q2 == q )
bool operator<(const Quality& q) const
{
if( size() > 1 )
if( q.size() > 1 )
return ( size() > q.size() );
else
return true; // *this is big but not q
else
if( q.size() > 1 )
return false; // q is big but not *this
return( sine() < q.sine() );
}
std::ostream& operator<<(std::ostream& out) const
{
return out << "(size=" << size()
<< ", sine=" << sine() << ")";
}
};
class Is_bad: public Base::Is_bad
{
public:
typedef typename Base::Is_bad::Point_2 Point_2;
Is_bad(const double aspect_bound,
const Geom_traits& traits)
: Base::Is_bad(aspect_bound, traits) {}
Mesh_2::Face_badness operator()(const Quality q) const
{
if( q.size() > 1 )
return Mesh_2::IMPERATIVELY_BAD;
if( q.sine() < this->B )
return Mesh_2::BAD;
else
return Mesh_2::NOT_BAD;
}
double user_sizing_function(const Point_2 p) const
{
// IMPLEMENT YOUR CUSTOM SIZING FUNCTION HERE.
// BUT MAKE SURE THIS RETURNS SOMETHING LARGER
// THAN ZERO TO ALLOW THE ALGORITHM TO TERMINATE
return std::abs(p.x()) + .025;
}
Mesh_2::Face_badness operator()(const typename CDT::Face_handle& fh,
Quality& q) const
{
typedef typename CDT::Geom_traits Geom_traits;
typedef typename Geom_traits::Compute_area_2 Compute_area_2;
typedef typename Geom_traits::Compute_squared_distance_2 Compute_squared_distance_2;
Geom_traits traits; /** #warning traits with data!! */
Compute_squared_distance_2 squared_distance =
traits.compute_squared_distance_2_object();
const Point_2& pa = fh->vertex(0)->point();
const Point_2& pb = fh->vertex(1)->point();
const Point_2& pc = fh->vertex(2)->point();
double size_bound = std::min(std::min(user_sizing_function(pa),
user_sizing_function(pb)),
user_sizing_function(pc));
double
a = CGAL::to_double(squared_distance(pb, pc)),
b = CGAL::to_double(squared_distance(pc, pa)),
c = CGAL::to_double(squared_distance(pa, pb));
double max_sq_length; // squared max edge length
double second_max_sq_length;
if(a<b)
{
if(b<c) {
max_sq_length = c;
second_max_sq_length = b;
}
else { // c<=b
max_sq_length = b;
second_max_sq_length = ( a < c ? c : a );
}
}
else // b<=a
{
if(a<c) {
max_sq_length = c;
second_max_sq_length = a;
}
else { // c<=a
max_sq_length = a;
second_max_sq_length = ( b < c ? c : b );
}
}
q.second = 0;
q.second = max_sq_length / (size_bound*size_bound);
// normalized by size bound to deal
// with size field
if( q.size() > 1 )
{
q.first = 1; // (do not compute sine)
return Mesh_2::IMPERATIVELY_BAD;
}
Compute_area_2 area_2 = traits.compute_area_2_object();
double area = 2*CGAL::to_double(area_2(pa, pb, pc));
q.first = (area * area) / (max_sq_length * second_max_sq_length); // (sine)
if( q.sine() < this->B )
return Mesh_2::BAD;
else
return Mesh_2::NOT_BAD;
}
};
Is_bad is_bad_object() const
{ return Is_bad(this->bound(), this->traits /* from the bad class */); }
};
I am also interested for variable mesh criteria on the domaine with CGAL. I have found an alternative many years ago : https://www.cs.cmu.edu/~quake/triangle.html
But i am still interested to do the same things with CGAL ... I don't know if it is possible ...

apply bind pose to a kinect skeleton

I want to normalize a skeleton in order to make it invariant to the size of the person
in front of the kinect; in the same way as the aveteering example.
But I don't want to animate a 3D model using XNA, the only thing I need is to normalize an
skeleton.
So in order to do this task, I have divided it in two functions:
(a) apply a bind pose to an skeleton in order to see how to work this matrix. Obviously this is not what i want to do, but it is a first step in order to
know how to work whit matrix, and so on.
(b) apply any arbitrary pose to a normalized-size-skeleton
First of all, I want to apply a bind pose to an skeleton (a).
First, I have to load the matrix that describe the bone length/ offset between bones and store it in
List BindPose.
Due to I have no idea how to do it, I modified the Aveteering example and write in a file all the Matrix that define
the BindPose, InverseBindPose and SkeletonHierarchy of the dude. I only need BindPose to this first task, but I have the
code prepared in order to do the second task (b)
The file looks like this:
1,331581E-06;-5,551115E-17;1;0;1;-4,16881E-11;-1,331581E-06;0;4,16881E-11;1;8,153579E-23;0;0,03756338;37,46099;2,230549;1
1,110223E-16;-4,435054E-22;1;0;1;1,426127E-06;-2,220446E-16;0;-1,426127E-06;1;-7,654181E-22;0;-0,9558675;-4,079016E-08;-6,266987E-12;1
0,9954988;-0,09477358;1,501821E-06;0;0,09477358;0,9954988;-4,019565E-06;0;-1,114112E-06;4,143805E-06;1;0;3,786007;-0,003599779;5,107028E-06;1
0,9948416;-0,101441;-3,23556E-07;0;0,101441;0,9948416;-2,266755E-08;0;3,241862E-07;-1,027114E-08;1;0;4,543321;-0,00359975;-1,33061E-07;1
0,9950595;0,09927933;2,388133E-07;0;-0,09927933;0,9950595;-2,333792E-08;0;-2,399506E-07;-4,86646E-10;1;0;4,544049;-0,003599948;6,324596E-08;1
0,9992647;0,02747673;0,02674458;0;-0,02928042;0,9971476;0,06956656;0;-0,02475683;-0,07029849;0,9972187;0;4,543965;-0,004398902;2,258555E-07;1
0,9154034;0,4025377;1,107153E-06;0;-0,4025377;0,9154033;-2,437432E-07;0;-1,109319E-06;-2,115673E-07;1;0;5,536249;-0,00288291;1,332601E-07;1
0,9812952;-0,1925096;-4,732622E-07;0;0,1925095;0,9812951;-3,00921E-08;0;4,697166E-07;-5,889972E-08;1;0;3,953898;1,702301E-07;4,88653E-08;1
.......
So each line is a 4X4 matrix defining the BindPose.
To generate this file, the code is like this:
private void ViewSkinningData(SkinningData data)
{
string nameFile = "bind_pose_transformations";
bool append = false;
// The using statement automatically closes the stream and calls IDisposable.Dispose on the stream object.
using (System.IO.StreamWriter file = new System.IO.StreamWriter(#nameFile, append))
{
for (int i = 0; i < data.BindPose.Count; i++)
{
Matrix m = data.BindPose[i];
string matrixString = MatrixToString(m);
file.WriteLine(matrixString);
}
for (int i = 0; i < data.InverseBindPose.Count; i++)
{
Matrix m = data.InverseBindPose[i];
string matrixString = MatrixToString(m);
file.WriteLine(matrixString);
}
for (int i = 0; i < data.SkeletonHierarchy.Count; i++)
{
file.Write(data.SkeletonHierarchy[i] + ";");
}
}
}
string MatrixToString(Matrix m)
{
string result;
result = m.M11 + ";" + m.M12 + ";" + m.M13 + ";" + m.M14 + ";" + m.M21 + ";" + m.M22 + ";" + m.M23 + ";" + m.M24 + ";" + m.M31 + ";" + m.M32 + ";" + m.M33 + ";" + m.M34 + ";" + m.M41 + ";" + m.M42 + ";" + m.M43 + ";" + m.M44;
return result;
}
Next step is to load all this Skinning data in my program:
private void InitializeSkinningDataFromFile()
{
string filename = "bind_pose_transformations";
int number_avatar_joints = 58;
List<Matrix> binpose = new System.Collections.Generic.List<Matrix>();
List<Matrix> inversebindpose = new System.Collections.Generic.List<Matrix>();
List<int> skeletonhierarchy = new System.Collections.Generic.List<int>();
// The using statement automatically closes the stream and calls IDisposable.Dispose on the stream object.
using (System.IO.StreamReader file = new System.IO.StreamReader(filename))
{
string s;
int count = 0;
while (!String.IsNullOrEmpty(s = file.ReadLine()))
{
string[] values = s.Split(';');
Matrix m = BuildMatrix(values);
binpose.Add(m);
count++;
if (count == number_avatar_joints)
{
break;
}
}
count = 0;
while (!String.IsNullOrEmpty(s = file.ReadLine()))
{
string[] values = s.Split(';');
Matrix m = BuildMatrix(values);
inversebindpose.Add(m);
count++;
if (count == number_avatar_joints)
{
break;
}
}
string[] skeletonHierarchy = file.ReadLine().Split(';'); //lee un caracter de separacion al final...
//for (int i = 0; i < skeletonHierarchy.Count(); i++)
for (int i = 0; i < number_avatar_joints; i++)
{
skeletonhierarchy.Add(int.Parse(skeletonHierarchy[i]));
}
}
skinningDataValue = new SkinningData(binpose, inversebindpose, skeletonhierarchy);
}
After, I have to construct boneTransforms structure:
// Bone matrices for the "dude" model
this.boneTransforms = new Matrix[skinningDataValue.BindPose.Count];
this.skinningDataValue.BindPose.CopyTo(this.boneTransforms, 0);
Now boneTransforms have the transformation for my skeleton. So now, i have to apply these trasnformations to an skeleton
Skeleton skeleton = new Skeleton();
foreach (Joint joint in skeleton.Joints)
{
int indexMatrix = AvatarBoneToNuiJointIndex(joint.JointType);
Matrix transform;
if (indexMatrix >= 0)
{
transform = this.boneTransforms[indexMatrix];
}
else
{
transform = Matrix.Identity;
}
Joint aux = ApplyMatrixTransformationToJoint(joint, transform);
normalizeSkel.Joints[joint.JointType] = aux;
}
This is a helper function AvatarBoneToNuiJointIndex:
public int AvatarBoneToNuiJointIndex(JointType jointType)
{
switch (jointType)
{
case JointType.HipCenter:
return 1;
case JointType.Spine:
return 4;
case JointType.ShoulderCenter:
return 6;
case JointType.Head:
return 7;
case JointType.ShoulderLeft:
return 12;
case JointType.ElbowLeft:
return 13;
case JointType.WristLeft:
return 14;
case JointType.HandLeft:
return 15;
case JointType.ShoulderRight:
return 31;
case JointType.ElbowRight:
return 32;
case JointType.WristRight:
return 33;
case JointType.HandRight:
return 34;
case JointType.KneeLeft:
return 50;
case JointType.AnkleLeft:
return 51;
case JointType.FootLeft:
return 52;
case JointType.KneeRight:
return 54;
case JointType.AnkleRight:
return 55;
case JointType.FootRight:
return 56;
default: return -1;
}
}
This is a helper function ApplyMatrixTransformationToJoint:
public Joint ApplyMatrixTransformationToJoint(Joint skeletonJoint, Matrix tranformations)
{
Vector3 pos = SkeletonPointToVector3(skeletonJoint.Position);
Vector3 result = ApplyMatrixTransformationToVector(pos, tranformations);
SkeletonPoint newPosition = new SkeletonPoint()
{
X = result.X,
Y = result.Y,
Z = result.Z
};
skeletonJoint.Position = newPosition;
return skeletonJoint;
}
This is the code for ApplyMatrixTransformationToVector:
static Vector3 ApplyMatrixTransformationToVector(Vector3 v, Matrix m)
{
return Vector3.Transform(v, m);
}
But the problem is that I can't see anything.
I don't know if this approach is correct.
Any help would be fantastic.
Many thanks!

Omaha Hi Hand Evaluator

Currently I'm trying to port Keith Rule's Texas Holdem Hand Evaluator to Omaha Hi:
Texas Holdem Evaluator and Analysis
More Analysis Part1
More Analysis Part 2
After thinking more about the algorithm, I found a solution which gives me the right percentages for the hands and everything is fine..
But it's really really slow. How can I speed things up?
As the only thing I do right now is to look-up a normal five card hands, a LUT might be right for me. Anyone integrated one before?
static void Main(string[] args)
{
long count = 0;
double player1win = 0.0, player2win=0.0;
ulong player1 = Hand.ParseHand("Ad Kd As Ks");
ulong player2 = Hand.ParseHand("Th 5c 2c 7d");
foreach (ulong board in Hand.Hands(0, player1 | player2, 5))
{
uint maxplayer1value = 0, maxplayer2value = 0;
foreach (ulong boardcards in Hand.Hands(0, ulong.MaxValue ^ board, 3))
{
foreach (ulong player1hand in Hand.Hands(0Ul, ulong.MaxValue ^ player1, 2))
{
uint player1value = Hand.Evaluate(player1hand | boardcards, 5);
if (player1value > maxplayer1value) maxplayer1value = player1value;
}
}
foreach (ulong boardcards in Hand.Hands(0, ulong.MaxValue ^ board, 3))
{
foreach (ulong player2hand in Hand.Hands(0UL, ulong.MaxValue ^ player2, 2))
{
uint player2value = Hand.Evaluate(player2hand | boardcards, 5);
if (player2value > maxplayer2value) maxplayer2value = player2value;
}
}
if (maxplayer1value > maxplayer2value)
{
player1win += 1.0;
}
else if (maxplayer2value > maxplayer1value)
{
player2win += 1.0;
}
else
{
player1win += 0.5;
player2win += 0.5;
}
count++;
}
Console.WriteLine("Player1: {0:0.0000} Player2: {1:0.0000} Count: {2}", player1win / count * 100, player2win / count * 100, count);
Console.ReadLine();
}
Looks like you're trying to create equity calculator. I've done this as well, but not for Omaha (Texas Hold'em instead). With then players to evaluate, I've got about ~200K hands per second, which gives accurate result enough in no time. If there only two players to
evaluate, I can get up to 4 million evaluations per second.
I used bitmasks for hands. One 64-bit integer to represent card, hand or entire board. You only need actually 52 of it, obviously. By using bitwise-operators, things get going rather quickly. Here's a quick sample from my project (in C++ tho). It's using 2 + 2 evaluator
for fast look-ups:
while (trial < trials) {
/** I use here a linked list over the hand-distributions (players).
* This is kind of natural as well, as circle is the basic
* shape of poker.
*/
pDist = pFirstDist;
unsigned __int64 usedCards = _deadCards;
bool collision;
/** Here, we choose random distributions for the comparison.
* There is a chance, that two separate distributions has
* the same card being picked-up. In that case, we have a collision,
* so do the choosing again.
*/
do {
pDist->Choose(usedCards, collision);
/** If there is only one hand in the distribution (unary),
* there is no need to check over collision, since it's been
* already done in the phase building them (distributions).
*/
if (pDist->_isUnary)
collision = false;
pDist = pDist->_pNext;
} while (pDist != pFirstDist && !collision);
if (collision) {
/** Oops! Collision occurred! Take the next player (hand-
* distribution and do this all over again.
*
*/
pFirstDist = pDist->_pNext;
continue;
}
unsigned __int64 board = 0;
/** Pick a board from the hashed ones, until it's unique compared to
* the distributions.
*
*/
do {
if (count == 1) {
board = boards[0];
collision = false;
} else {
board = boards[Random()];
collision = (board & usedCards) != 0;
}
} while (collision);
board |= _boardCards;
int best = 0, s = 1;
do {
pDist->_currentHand |= board;
unsigned long i, l = static_cast<unsigned long>(pDist->_currentHand >> 32);
int p;
bool f = false;
/** My solution to find out the set bits.
* Since I'm working on a 32-bit environment, the "64-bit"
* variable needs to be split in to parts.
*/
if (_BitScanForward(&i, l)) {
p = _evaluator->_handRanks[53 + i + 32]; // Initial entry to the 2 + 2 evaluator hash.
l &= ~(static_cast<unsigned long>(1) << i);
f = true;
}
if (f)
while (_BitScanForward(&i, l)) {
l &= ~(static_cast<unsigned long>(1) << i);
p = _evaluator->_handRanks[p + i + 32];
}
l = static_cast<unsigned long>(pDist->_currentHand & 0xffffffff);
if (!f) {
_BitScanForward(&i, l);
p = _evaluator->_handRanks[53 + i];
l &= ~(static_cast<unsigned long>(1) << i);
}
while (_BitScanForward(&i, l)) {
l &= ~(static_cast<unsigned long>(1) <<_handRanks[p + i];
}
pDist->_rank = p;
/** Keep the statistics up. Please do remember, that
* equity consist of ties as well, so it's not a percentual
* chance of winning.
*/
if (p > best) {
pWinner = pDist;
s = 1;
best = p;
} else if (p == best)
++s;
pDist = pDist->_pNext;
} while (pDist != pFirstDist);
if (s > 1) {
for (unsigned int i = 0; i _rank == best) {
_handDistributions[i]->_ties += 1.0f / s;
_handDistributions[i]->_equity += 1.0f / s;
}
} else {
++pWinner->_wins;
++pWinner->_equity;
}
++trial;
pFirstDist = pDist->_pNext;
}
Please refer to the 2 + 2 evaluator, which is quite easy to adapt in your own needs.
This might help:
An example of a ready made Objective-C (and Java) Texas Hold'em 7- and 5-card evaluator can be found here and further explained here. It "adds" up hands to generate an index that sufficiently characterises the hand for determining rank.
All feedback welcome at the e-mail address found therein