OpenKinect acquire raw depth image - kinect

I am trying to use the example code from here.
I have made some changes in order to save the images to the computer. When I read the data in MATLAB it seems like values that should be 0 are set to 2047, and overall it does not seem to be correct when I reconstruct the 3D points using the default intrinsic camera parameters.
What I want to achieve is to save the images so that I can use
img = single(imread(depth.png'))/ 1000
and have the depth values in meters, and pixels with no measurements should be zero.
It is the Kinect V1 by the way.
Here is the code with comments where I have tried to change.
#include "libfreenect.hpp"
#include <iostream>
#include <vector>
#include <cmath>
#include <pthread.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace cv;
using namespace std;
class myMutex {
public:
myMutex() {
pthread_mutex_init( &m_mutex, NULL );
}
void lock() {
pthread_mutex_lock( &m_mutex );
}
void unlock() {
pthread_mutex_unlock( &m_mutex );
}
private:
pthread_mutex_t m_mutex;
};
// Should one use FREENECT_DEPTH_REGISTERED instead of FREENECT_DEPTH_11BIT?
class MyFreenectDevice : public Freenect::FreenectDevice {
public:
MyFreenectDevice(freenect_context *_ctx, int _index)
: Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(FREENECT_DEPTH_11BIT),
m_buffer_rgb(FREENECT_VIDEO_RGB), m_gamma(2048), m_new_rgb_frame(false),
m_new_depth_frame(false), depthMat(Size(640,480),CV_16UC1),
rgbMat(Size(640,480), CV_8UC3, Scalar(0)),
ownMat(Size(640,480),CV_8UC3,Scalar(0)) {
for( unsigned int i = 0 ; i < 2048 ; i++) {
float v = i/2048.0;
v = std::pow(v, 3)* 6;
m_gamma[i] = v*6*256;
}
}
// Do not call directly even in child
void VideoCallback(void* _rgb, uint32_t timestamp) {
std::cout << "RGB callback" << std::endl;
m_rgb_mutex.lock();
uint8_t* rgb = static_cast<uint8_t*>(_rgb);
rgbMat.data = rgb;
m_new_rgb_frame = true;
m_rgb_mutex.unlock();
};
// Do not call directly even in child
void DepthCallback(void* _depth, uint32_t timestamp) {
std::cout << "Depth callback" << std::endl;
m_depth_mutex.lock();
uint16_t* depth = static_cast<uint16_t*>(_depth);
// Here I use memcpy instead so I can use uint16
// memcpy(depthMat.data,depth,depthMat.rows*depthMat.cols*sizeof(uint16_t));
depthMat.data = (uchar*) depth;
m_new_depth_frame = true;
m_depth_mutex.unlock();
}
bool getVideo(Mat& output) {
m_rgb_mutex.lock();
if(m_new_rgb_frame) {
cv::cvtColor(rgbMat, output, CV_RGB2BGR);
m_new_rgb_frame = false;
m_rgb_mutex.unlock();
return true;
} else {
m_rgb_mutex.unlock();
return false;
}
}
bool getDepth(Mat& output) {
m_depth_mutex.lock();
if(m_new_depth_frame) {
depthMat.copyTo(output);
m_new_depth_frame = false;
m_depth_mutex.unlock();
return true;
} else {
m_depth_mutex.unlock();
return false;
}
}
private:
// Should it be uint16_t instead or even higher?
std::vector<uint8_t> m_buffer_depth;
std::vector<uint8_t> m_buffer_rgb;
std::vector<uint16_t> m_gamma;
Mat depthMat;
Mat rgbMat;
Mat ownMat;
myMutex m_rgb_mutex;
myMutex m_depth_mutex;
bool m_new_rgb_frame;
bool m_new_depth_frame;
};
int main(int argc, char **argv) {
bool die(false);
string filename("snapshot");
string suffix(".png");
int i_snap(0),iter(0);
Mat depthMat(Size(640,480),CV_16UC1);
Mat depthf (Size(640,480),CV_8UC1);
Mat rgbMat(Size(640,480),CV_8UC3,Scalar(0));
Mat ownMat(Size(640,480),CV_8UC3,Scalar(0));
// The next two lines must be changed as Freenect::Freenect
// isn't a template but the method createDevice:
// Freenect::Freenect<MyFreenectDevice> freenect;
// MyFreenectDevice& device = freenect.createDevice(0);
// by these two lines:
Freenect::Freenect freenect;
MyFreenectDevice& device = freenect.createDevice<MyFreenectDevice>(0);
namedWindow("rgb",CV_WINDOW_AUTOSIZE);
namedWindow("depth",CV_WINDOW_AUTOSIZE);
device.startVideo();
device.startDepth();
while (!die) {
device.getVideo(rgbMat);
device.getDepth(depthMat);
// Here I save the depth images
std::ostringstream file;
file << filename << i_snap << suffix;
cv::imwrite(file.str(),depthMat);
cv::imshow("rgb", rgbMat);
depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
cv::imshow("depth",depthf);
if(iter >= 1000) break;
iter++;
}
device.stopVideo();
device.stopDepth();
return 0;
}
Thanks in advance!
Erik

I dont have any experience with OpenKinect in particular; but should your depth buffer be uint16?
std::vector<uint8_t> m_buffer_depth;
Also; for Matlab, do check if the image that you are reading is a uint16 or uint8. If its the latter then convert it to uint16
uint16(imread('depth.png'));
Sorry couldn't help more. Hope this helps.

The values you have are the raw depth values. You need to remap those into MM for the numbers to make sense. Kinect 1 can see up to 10 meters. So I would go with raw_values/2407*10000.
If the values are saturated at 2047, you are probably using the FREENECT_DEPTH_11BIT_PACKED depth format.
For work in Matlab, it is always easier to use FREENECT_DEPTH_MM or FREENECT_DEPTH_REGISTERED.
Enjoy.

Related

A problem with the form of converting ply to fbx

We are currently in the process of converting PLY or PCD data to FBX.
I did the conversion using the Assimp library as shown below, but I found a problem with missing colors.
May I know where is wrong?
#include <assimp/Importer.hpp>
#include <assimp/scene.h>
#include <assimp/postprocess.h>
#include <assimp/Exporter.hpp>
#include <System.h>
int main(int argc, char* argv[]) {
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile("/app/test.ply",
aiProcess_Triangulate |
aiProcess_JoinIdenticalVertices |
aiProcess_SortByPType);
// Check that the import was successful
if (!scene) {
std::cerr << "Failed to import file: " << importer.GetErrorString() << std::endl;
return 1;
}
// Iterate over all the meshes in the scene
for (unsigned int i = 0; i < scene->mNumMeshes; ++i) {
aiMesh* mesh = scene->mMeshes[i];
// Check if the mesh has color data
if (mesh->HasVertexColors(0)) {
// Set the number of color components to 3 (for RGB)
//mesh->mNumColorComponents = 3;
// Allocate memory for the color data
mesh->mColors[0] = new aiColor4D[mesh->mNumVertices];
// Fill in the color data (for example, set all vertices to red)
for (unsigned int j = 0; j < mesh->mNumVertices; ++j) {
mesh->mColors[0][j].r = 1.0f;
mesh->mColors[0][j].g = 0.0f;
mesh->mColors[0][j].b = 0.0f;
mesh->mColors[0][j].a = 1.0f;
}
}
}
// Export the scene to fbx format
Assimp::Exporter exporter;
exporter.Export(scene, "fbx", "output.fbx");
return 0;
}

How to measure the execution time of GPU with using Profiling+openCL+Sycl+DPCPP

I read this link
https://github.com/intel/pti-gpu
and I tried to use Device Activity Tracing for OpenCL(TM), but I am confused and I do not know how should I measure the time on the accelerators with using Device Activity documentation.
for measuring the performance of CPU I used chrono, but I am interested in to using profiling for measuring the performance of CPU and GPU in different devices.
my program:
#include <CL/sycl.hpp>
#include <iostream>
#include <tbb/tbb.h>
#include <tbb/parallel_for.h>
#include <vector>
#include <string>
#include <queue>
#include<tbb/blocked_range.h>
#include <tbb/global_control.h>
#include <chrono>
using namespace tbb;
template<class Tin, class Tout, class Function>
class Map {
private:
Function fun;
public:
Map() {}
Map(Function f):fun(f) {}
std::vector<Tout> operator()(bool use_tbb, std::vector<Tin>& v) {
std::vector<Tout> r(v.size());
if(use_tbb){
// Start measuring time
auto begin = std::chrono::high_resolution_clock::now();
tbb::parallel_for(tbb::blocked_range<Tin>(0, v.size()),
[&](tbb::blocked_range<Tin> t) {
for (int index = t.begin(); index < t.end(); ++index){
r[index] = fun(v[index]);
}
});
// Stop measuring time and calculate the elapsed time
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin);
printf("Time measured: %.3f seconds.\n", elapsed.count() * 1e-9);
return r;
} else {
sycl::queue gpuQueue{sycl::gpu_selector()};
sycl::range<1> n_item{v.size()};
sycl::buffer<Tin, 1> in_buffer(&v[0], n_item);
sycl::buffer<Tout, 1> out_buffer(&r[0], n_item);
gpuQueue.submit([&](sycl::handler& h){
//local copy of fun
auto f = fun;
sycl::accessor in_accessor(in_buffer, h, sycl::read_only);
sycl::accessor out_accessor(out_buffer, h, sycl::write_only);
h.parallel_for(n_item, [=](sycl::id<1> index) {
out_accessor[index] = f(in_accessor[index]);
});
}).wait();
}
return r;
}
};
template<class Tin, class Tout, class Function>
Map<Tin, Tout, Function> make_map(Function f) { return Map<Tin, Tout, Function>(f);}
typedef int(*func)(int x);
//define different functions
auto function = [](int x){ return x; };
auto functionTimesTwo = [](int x){ return (x*2); };
auto functionDivideByTwo = [](int x){ return (x/2); };
auto lambdaFunction = [](int x){return (++x);};
int main(int argc, char *argv[]) {
std::vector<int> v = {1,2,3,4,5,6,7,8,9};
//auto f = [](int x){return (++x);};
//Array of functions
func functions[] =
{
function,
functionTimesTwo,
functionDivideByTwo,
lambdaFunction
};
for(int i = 0; i< sizeof(functions); i++){
auto m1 = make_map<int, int>(functions[i]);
//auto m1 = make_map<int, int>(f);
std::vector<int> r = m1(true, v);
//print the result
for(auto &e:r) {
std::cout << e << " ";
}
}
return 0;
}
First of all, SYCL Kernel wont support function pointers. So, you can change the code accordingly.
One way to achieve profiling in GPU is by following the steps below:
1.Enable profiling mode for the command queue of the target device
2.Introduce the event for the target device activity
3.Set the callback to be notified when the activity is completed
4.Read the profiling data inside the callback
Basically, you need to use CL_PROFILING_COMMAND_START and CL_PROFILING_COMMAND_END (command identified by event start and end execution on the device) inside the call back.
You can find the detailed steps here
https://github.com/intel/pti-gpu/blob/master/chapters/device_activity_tracing/OpenCL.md
I would also advice you to check the samples for pti-gpu using Device Activity Tracing. Check the URL for the same
https://github.com/intel/pti-gpu/tree/master/samples

passing a class variable to API function

I want to track a global variable that I am passing into an API function. I found that one could do it using a class:
template <class T>
class MonitoredVariable
{
public:
MonitoredVariable() {}
MonitoredVariable(const T& value) : m_value(value) {}
//T operator T() const { return m_value; }
const MonitoredVariable& operator = (const T& value)
{
PlugIn::gResultOut << "value changed " << std::endl;
m_value = value;
return *this;
}
private:
T m_value;
};
The API function takes variables as
bool APIFunction(double time, bool *is_done, double *fraction_done);
The following gives me an error:
ImagePtr Im;
bool is_done;
MonitoredVariable<double*> fraction_done;
bool frameready = Im->APIFunction(2.1, is_done, fraction_done);
ERROR:
error C2664: cannot convert argument 3 from 'MonitoredVariable<double *>' to 'double *'
what would I have to change here?
thx!
I'm not really sure if this is what you want:
#include <iostream>
using namespace std;
template <class T>
class MonitoredVariable
{
public:
MonitoredVariable() {}
MonitoredVariable(const T& value) : m_value(value) {}
//T operator T() const { return m_value; }
const MonitoredVariable& operator = (const T& value)
{
//PlugIn::gResultOut << "value changed " << std::endl;
m_value = value.m_value;
return *this;
}
void printValue() {
std::cout << m_value;
}
T& getValue() {
return m_value;
}
private:
T m_value;
};
bool func(double firstDouble, bool *is_done, double* fraction_done) {
// do stuff
*fraction_done = firstDouble + (40.23 * 5);
*is_done = true;
return true;
}
int main()
{
bool is_done = true;
MonitoredVariable<double> fraction_done;
func(2.10, &is_done, &fraction_done.getValue());
fraction_done.printValue();
return 0;
}
So basically we have a Class called MonitoredVariable which has a variable called m_value. I'm not really sure why you wanted it to be a pointer, because we can also take the address of a normal double variable.
In the following it makes perhaps more sense what I want to achieve. I want to input a class variable into an API function and monitor the variable in real time. This value goes from zero to 1 every 3 ms or so. Yet I try to avoid using while loop and track it within the class with overloaded = operator.
#include "stdafx.h"
#include <iostream>
#include <thread>
#include <future>
using namespace std;
template <class T>
class MonitoredVariable
{
public:
MonitoredVariable() {}
MonitoredVariable(const T& value) : m_value(value) {}
void printValue() {
std::cout << m_value;
}
const MonitoredVariable& operator = (const T& value)
{
m_value = value.m_value;
if(m_value> 0.8) std::cout << m_value; // *THIS NEVER GETS PRINTED!!!*
return *this;
}
T& getValue() {
return m_value;
}
private:
T m_value;
};
bool func(bool *is_done, double* fraction_done) {
unsigned long c = 1;
while (*is_done)
{
*fraction_done = (double) 0.01*c;
this_thread::sleep_for(chrono::milliseconds(10));
c++;
if (*fraction_done >= 1) *is_done = false;
}
return true;
}
int main()
{
bool is_done = true;
MonitoredVariable<double> *MonitoredVariablePtr = new MonitoredVariable<double>();
std::future<bool> fu = std::async(std::launch::async,func, &is_done, &MonitoredVariablePtr->getValue());
// IF I UNCOMMENT THIS, IT PRINTS...
/*
while(is_done)
{
if(MonitoredVariablePtr->getValue() > 0.8) MonitoredVariablePtr->printValue();
}
*/
return 0;
}
Why does not the (if(m_value> 0.8) std::cout << m_value) line within the class never gets printed when the value is updated?
THX!

How to get camera device name in OpenCV?

There having these two ways to open the camera cv::VideoCapture:
CV_WRAP virtual bool open(const String& filename)
CV_WRAP virtual bool open(int index)
Is possible open the camera using the index and get the filename(device name) from the VideoCapture object?
or How to find the device name of a USB webcam in Windows which pass to open function?
From my knowledge, OpenCV does not provide such functionality. To get the names of the attached devices, you'll need to go lower and use the DirectShow or WMF API to get the device enumeration list.
Here is a good answer which will help you to do what you want, but not with OpenCV. Pasting the same code from the post.
#pragma once
#include <new>
#include <windows.h>
#include <mfapi.h>
#include <mfidl.h>
#include <mfreadwrite.h>
#include <Wmcodecdsp.h>
#include <assert.h>
#include <Dbt.h>
#include <shlwapi.h>
#include <mfplay.h>
#include <iostream>
const UINT WM_APP_PREVIEW_ERROR = WM_APP + 1; // wparam = HRESULT
class DeviceList
{
UINT32 m_cDevices; // contains the number of devices
IMFActivate **m_ppDevices; // contains properties about each device
public:
DeviceList() : m_ppDevices(NULL), m_cDevices(0)
{
MFStartup(MF_VERSION);
}
~DeviceList()
{
Clear();
}
UINT32 Count() const { return m_cDevices; }
void Clear();
HRESULT EnumerateDevices();
HRESULT GetDevice(UINT32 index, IMFActivate **ppActivate);
HRESULT GetDeviceName(UINT32 index, WCHAR **ppszName);
};
#include "DeviceList.h"
/*
* A templated Function SafeRelease releasing pointers memories
* #param ppT the pointer to release
*/
template <class T> void SafeRelease(T **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
/*
* A function which copy attribute form source to a destination
* # param pSrc is an Interface to store key/value pairs of an Object
* # param pDest is an Interface to store key/value pairs of an Object
* # param GUID is an unique identifier
* # return HRESULT return errors warning condition on windows
*/
HRESULT CopyAttribute(IMFAttributes *pSrc, IMFAttributes *pDest, const GUID& key);
/*
* A Method form DeviceList which clear the list of Devices
*/
void DeviceList::Clear()
{
for (UINT32 i = 0; i < m_cDevices; i++)
{
SafeRelease(&m_ppDevices[i]);
}
CoTaskMemFree(m_ppDevices);
m_ppDevices = NULL;
m_cDevices = 0;
}
/*
* A function which enumerate the list of Devices.
* # return HRESULT return errors warning condition on windows
*/
HRESULT DeviceList::EnumerateDevices()
{
HRESULT hr = S_OK;
IMFAttributes *pAttributes = NULL;
this->Clear();
// Initialize an attribute store. We will use this to
// specify the enumeration parameters.
std::cout << "Enumerate devices" << std::endl;
hr = MFCreateAttributes(&pAttributes, 1);
// Ask for source type = video capture devices
if (SUCCEEDED(hr))
{
std::cout << "Enumerate devices" << std::endl;
hr = pAttributes->SetGUID(
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
);
}
// Enumerate devices.
if (SUCCEEDED(hr))
{
std::cout << "Enumerate devices:" << m_cDevices << std::endl;
hr = MFEnumDeviceSources(pAttributes, &m_ppDevices, &m_cDevices);
}
SafeRelease(&pAttributes);
return hr;
}
/*
* A function which copy attribute form source to a destination
* # param index the index in an array
* # param ppActivate is an Interface to store key/value pairs of an Object
* # return HRESULT return errors warning condition on windows
*/
HRESULT DeviceList::GetDevice(UINT32 index, IMFActivate **ppActivate)
{
if (index >= Count())
{
return E_INVALIDARG;
}
*ppActivate = m_ppDevices[index];
(*ppActivate)->AddRef();
return S_OK;
}
/*
* A function which get the name of the devices
* # param index the index in an array
* # param ppszName Name of the device
*/
HRESULT DeviceList::GetDeviceName(UINT32 index, WCHAR **ppszName)
{
std::cout << "Get Device name" << std::endl;
if (index >= Count())
{
return E_INVALIDARG;
}
HRESULT hr = S_OK;
hr = m_ppDevices[index]->GetAllocatedString(
MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME,
ppszName,
NULL
);
return hr;
}
#include <iostream>
#include "DeviceList.h"
HRESULT UpdateDeviceList()
{
HRESULT hr = S_OK;
WCHAR *szFriendlyName = NULL;
DeviceList g_devices;
g_devices.Clear();
hr = g_devices.EnumerateDevices();
if (FAILED(hr)) { goto done; }
std::cout << "Nb devices found:"<< g_devices.Count() << std::endl;
for (UINT32 iDevice = 0; iDevice < g_devices.Count(); iDevice++)
{
//std::cout << "" << std::endl;
hr = g_devices.GetDeviceName(iDevice, &szFriendlyName);
if (FAILED(hr)) { goto done; }
std::cout << szFriendlyName << std::endl;
// The list might be sorted, so the list index is not always the same as the
// array index. Therefore, set the array index as item data.
CoTaskMemFree(szFriendlyName);
szFriendlyName = NULL;
}
std::cout << "End of EnumDeviceList" << std::endl;
done:
return hr;
}
int main()
{
std::cout <<"Main" << std::endl;
UpdateDeviceList();
while (1);
return 0;
}
This is more close to what you want(Note that, all STRING types are std::string):
int _GetUSBCameraDevicesList(std::vector<__STRING__>& list, std::vector<__STRING__>& devicePaths)
{
//COM Library Initialization
//comInit();
ICreateDevEnum* pDevEnum = NULL;
IEnumMoniker* pEnum = NULL;
int deviceCounter = 0;
CoInitialize(NULL);
HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL,
CLSCTX_INPROC_SERVER, IID_ICreateDevEnum,
reinterpret_cast<void**>(&pDevEnum));
if (SUCCEEDED(hr))
{
// Create an enumerator for the video capture category.
hr = pDevEnum->CreateClassEnumerator(
CLSID_VideoInputDeviceCategory,
&pEnum, 0);
if (hr == S_OK) {
printf("SETUP: Looking For Capture Devices\n");
IMoniker* pMoniker = NULL;
while (pEnum->Next(1, &pMoniker, NULL) == S_OK) {
IPropertyBag* pPropBag;
hr = pMoniker->BindToStorage(0, 0, IID_IPropertyBag,
(void**)(&pPropBag));
if (FAILED(hr)) {
pMoniker->Release();
continue; // Skip this one, maybe the next one will work.
}
// Find the description or friendly name.
VARIANT varName;
VariantInit(&varName);
hr = pPropBag->Read(L"Description", &varName, 0);
if (FAILED(hr)) hr = pPropBag->Read(L"FriendlyName", &varName, 0);
if (SUCCEEDED(hr))
{
hr = pPropBag->Read(L"FriendlyName", &varName, 0);
int count = 0;
char tmp[255] = { 0 };
//int maxLen = sizeof(deviceNames[0]) / sizeof(deviceNames[0][0]) - 2;
while (varName.bstrVal[count] != 0x00 && count < 255)
{
tmp[count] = (char)varName.bstrVal[count];
count++;
}
list.emplace_back(tmp);
//deviceNames[deviceCounter][count] = 0;
//if (!silent) DebugPrintOut("SETUP: %i) %s\n", deviceCounter, deviceNames[deviceCounter]);
// then read Device Path
{
VARIANT DP_Path;
VariantInit(&DP_Path);
hr = pPropBag->Read(L"DevicePath", &DP_Path, 0);
if (SUCCEEDED(hr))
{
int __count = 0;
char __tmp[255] = { 0 };
while (DP_Path.bstrVal[__count] != 0x00 && __count < 255)
{
__tmp[__count] = (char)DP_Path.bstrVal[__count];
__count++;
}
devicePaths.emplace_back(__tmp);
}
}
}
pPropBag->Release();
pPropBag = NULL;
pMoniker->Release();
pMoniker = NULL;
deviceCounter++;
}
pDevEnum->Release();
pDevEnum = NULL;
pEnum->Release();
pEnum = NULL;
}
//if (!silent) DebugPrintOut("SETUP: %i Device(s) found\n\n", deviceCounter);
}
//comUnInit();
return deviceCounter;
}
Just pass two std::vectorstd::string() to get the list of all connected devices and their paths(Paths may be used to compare device identity if two device have the same friendly name).

Controlling the volume of other applications

I am trying to make an app that controls the volume of another process using the Windows 7 Audio API.
What I'm looking for is the ISimpleAudioVolume for the session used by the other process.
I have tried using the IAudioSessionEnumerator but it will only give me the IAudioSessionControl2 of the session. Using the IAudioSessionControl I have managed to receive notifications when I change the volume through sndvol but not change it myself.
I have also tried using GetSimpleAudioVolume() from IAudioSessionManager but it will only give me sessions within the current process.
How do you do it? It should be possible since sndvol is doing this.
Here is an example of muting another process using Core Audio API.
#include <windows.h>
#include <iostream>
#include <mmdeviceapi.h>
#include <endpointvolume.h>
#include <Audiopolicy.h>
#include <comdef.h>
#include <comip.h>
#define CHECK_HR(hr) \
if(FAILED(hr)) { \
std::cout << "error" << std::endl; \
return 0; \
}
_COM_SMARTPTR_TYPEDEF(IMMDevice, __uuidof(IMMDevice));
_COM_SMARTPTR_TYPEDEF(IMMDeviceEnumerator, __uuidof(IMMDeviceEnumerator));
_COM_SMARTPTR_TYPEDEF(IAudioSessionManager2, __uuidof(IAudioSessionManager2));
_COM_SMARTPTR_TYPEDEF(IAudioSessionManager2, __uuidof(IAudioSessionManager2));
_COM_SMARTPTR_TYPEDEF(IAudioSessionEnumerator, __uuidof(IAudioSessionEnumerator));
_COM_SMARTPTR_TYPEDEF(IAudioSessionControl2, __uuidof(IAudioSessionControl2));
_COM_SMARTPTR_TYPEDEF(IAudioSessionControl, __uuidof(IAudioSessionControl));
_COM_SMARTPTR_TYPEDEF(ISimpleAudioVolume, __uuidof(ISimpleAudioVolume));
IAudioSessionManager2Ptr CreateSessionManager()
{
HRESULT hr = S_OK;
IMMDevicePtr pDevice;
IMMDeviceEnumeratorPtr pEnumerator;
IAudioSessionManager2Ptr pSessionManager;
// Create the device enumerator.
CHECK_HR(hr = CoCreateInstance(
__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_ALL,
__uuidof(IMMDeviceEnumerator),
(void**)&pEnumerator));
// Get the default audio device.
CHECK_HR(hr = pEnumerator->GetDefaultAudioEndpoint(
eRender, eConsole, &pDevice));
// Get the session manager.
CHECK_HR(hr = pDevice->Activate(
__uuidof(IAudioSessionManager2), CLSCTX_ALL,
NULL, (void**)&pSessionManager));
return pSessionManager;
}
bool MuteProcess(DWORD processId) {
IAudioSessionManager2Ptr mgr = CreateSessionManager();
if (!mgr) {
return false;
}
IAudioSessionEnumeratorPtr enumerator;
if (SUCCEEDED(mgr->GetSessionEnumerator(&enumerator))) {
int sessionCount;
if (SUCCEEDED(enumerator->GetCount(&sessionCount))) {
for (int i = 0; i < sessionCount; i++) {
IAudioSessionControlPtr control;
if (SUCCEEDED(enumerator->GetSession(i, &control))) {
IAudioSessionControl2Ptr control2;
if (SUCCEEDED(control->QueryInterface(__uuidof(IAudioSessionControl2), (void**)&control2))) {
DWORD foundProcessId;
if (SUCCEEDED(control2->GetProcessId(&foundProcessId))) {
if (foundProcessId == processId) {
ISimpleAudioVolumePtr volume;
if (SUCCEEDED(control2->QueryInterface(_uuidof(ISimpleAudioVolume), (void**)&volume))) {
if (SUCCEEDED(volume->SetMute(TRUE, 0))) {
return true;
}
}
}
}
}
}
}
}
}
return false;
}
int _tmain(int argc, _TCHAR* argv[]){
CoInitialize(NULL);
DWORD processId = 11944;
MuteProcess(processId);
return 0;
}
There is an MSDN forum question and Blog Post about this very question. Hope this helps.
According to Larry Osterman
"There is no publicly documented mechanism for doing what you're trying to do."