I have a ASP NET Core application that will serve as a RabbitMQ producer.I have read the tutorial and guides regarding the RabbitMQ .NET client and i still do not know how to deal with the channel lifetime and concurrent access.
From what i have read i understood the following:
IConnection is threadsafe ,but is costly to create
IModel is not threadsafe but is lightweight
For the IConnection i would initialize it in the Startup and inject it as a singleton (service).
However i I do not know how to deal with IModel management.Lets say i have a couple of services that use it, is it scalable to just :
Solution 1
public void Publish(IConnection connection)
{
using(IModel model=connection.CreateChannel())
{
model.BasicPublish(...);
}
}
Solution 2
From what i have read , i understood that its not really scalable.
So another solution would be to create a separate service which would contain a loop , a ConcurrentQueue, and all services would dispatch messages here.
This service would be the sole publisher to RabbitMQ
Publisher
public class Publisher
{
private CancellationTokenSource tcs=new CancellationTokenSource();
public BlockingCollection<byte[]> messages=new BlockingCollection<byte[]>();
private IModel channel;
private readonly string ExchangeName;
private Task loopTask;
public void Run()
{
this.loopTask=Task.Run(()=>Loop(tcs.Token),tcs.Token);
}
private void Loop(Cancellation token)
{
while(true)
{
token.ThrowIfCancellationRequested();
queue.Take(out byte[]data);
channel.BasicPublish(...,body:data);
}
}
public void Publish(byte[] message)
{
this.queue.Add(message);
}
}
Usage
public class SomeDIService
{
private IConnection connection;
SomeDIService(Publisher publisher)
{
this.publisher=publisher;
}
public void DoSomething(byte[] data)
{
//do something...
this.publisher.Publish(data);
}
}
I would prefer solution 1 but i do not know the performance penalty ,while i do not like solution 2 since i wanted to just publish messages directly to RabbitMQ.Now i have to deal with this long running Task too.
Is there any other solution , am i missing something ? Is there a simpler way?
Update
I mentioned concurrent access.I meant i need a way to publish messages from multiple endpoints (services) to RabbitMQ.
Real scenario
public class Controller1:Controller
{
private SomeDIService service; //uses Publisher
[HttpGet]
public void Endpoint1()
{
this.service.DoSomething();
}
[HttpPost]
public void Endpoint2()
{
this.service.DoSomething();
}
}
public class Controller2:Controller
{
private SomeDIService service;
[HttpGet]
public void Endpoint3()
{
this.service.DoSomething();
}
[HttpPost]
public void Endpoint4()
{
this.service.DoSomething();
}
}
after searching for long time i found this solution and it works very good for me
using Microsoft.Extensions.Options;
using RabbitMQ.Client;
using System;
using System.Collections.Generic;
using System.Text;
using System.Text.Json;
using System.Threading.Tasks;
namespace BSG.MessageBroker.RabbitMQ
{
public class Rabbit : IRabbit
{
private readonly EnvConfigModel EnvConfig;
private readonly string _hostname;
private readonly string _password;
private readonly string _exchangeName;
private readonly string _username;
private IConnection _connection;
private IModel _Model;
public Rabbit(IOptions<EnvConfigModel> appSettings)
{
EnvConfig = appSettings.Value;
_Logger = services;
_exchangeName = EnvConfig.Rabbit_ExchangeName;
_hostname = EnvConfig.Rabbit_Host;
_username = EnvConfig.Rabbit_Username;
_password = EnvConfig.Rabbit_Password;
CreateConnection();
_Model = _connection.CreateModel();
}
private void CreateConnection()
{
try
{
var factory = new ConnectionFactory
{
HostName = _hostname,
UserName = _username,
Password = _password,
AutomaticRecoveryEnabled = true,
TopologyRecoveryEnabled = true,
NetworkRecoveryInterval = TimeSpan.FromSeconds(3)
};
_connection = factory.CreateConnection();
}
catch (Exception ex)
{
Console.WriteLine($"Could not create connection: {ex.Message}");
}
}
private bool ConnectionExists()
{
if (_connection != null)
{
return true;
}
CreateShredderConnection();
return _connection != null;
}
public bool PushToQueue(string Message)
{
try
{
if (ConnectionExists())
{
byte[] body = Encoding.UTF8.GetBytes(JsonSerializer.Serialize(Message));
_Model.BasicPublish(exchange: _exchangeName,
routingKey: 1001,
basicProperties: null,
body: body);
}
return true;
}
catch (Exception ex)
{
return false;
}
}
}
}
Related
Let's say I have several ASP.NET BackgroundServices and each is logging to its own scope/operation (OP1 and OP2).
public class MyBackgroundService1 : BackgroundService
{
private readonly ILogger<MyBackgroundService1> _logger;
public MyBackgroundService1(ILogger<MyBackgroundService1> logger)
{
_logger = logger;
}
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
{
var activity = new Activity("OP1");
activity.Start();
while (!stoppingToken.IsCancellationRequested)
{
_logger.LogInformation("Hello from MyBackgroundService1");
await Task.Delay(5000, stoppingToken);
}
}
}
public class MyBackgroundService2 : BackgroundService
{
private readonly ILogger<MyBackgroundService2> _logger;
public MyBackgroundService2(ILogger<MyBackgroundService2> logger)
{
_logger = logger;
}
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
{
var activity = new Activity("OP2");
activity.Start();
while (!stoppingToken.IsCancellationRequested)
{
_logger.LogInformation("Hello from MyBackgroundService2");
await Task.Delay(1000, stoppingToken);
}
}
}
Now I would like to use Blazor and want to display a table per operation with all corresponding logs.
Example output
OP1 Logs:
Hello from MyBackgroundService1
Hello from MyBackgroundService1
OP2 Logs:
Hello from MyBackgroundService2
Hello from MyBackgroundService2
How would I do that?
For this purpose, you need to create a log provider that stores the information in the database and then retrieves the information from the log table.
First, create a class to store logs in the database as follows:
public class DBLog
{
public int DBLogId { get; set; }
public string? LogLevel { get; set; }
public string? EventName { get; set; }
public string? Message { get; set; }
public string? StackTrace { get; set; }
public DateTime CreatedDate { get; set; }=DateTime.Now;
}
Now, We need to create a custom DBLogger. The DBLogger class inherits from the ILogger interface and has three methods, the most important of which is the Log method, which is actually called every time the Logger is called in the program. To read more about the other two methods, you can refer here.
public class DBLogger:ILogger
{
private readonly LogLevel _minLevel;
private readonly DbLoggerProvider _loggerProvider;
private readonly string _categoryName;
public DBLogger(
DbLoggerProvider loggerProvider,
string categoryName
)
{
_loggerProvider= loggerProvider ?? throw new ArgumentNullException(nameof(loggerProvider));
_categoryName= categoryName;
}
public IDisposable BeginScope<TState>(TState state)
{
return new NoopDisposable();
}
public bool IsEnabled(LogLevel logLevel)
{
return logLevel >= _minLevel;
}
public void Log<TState>(
LogLevel logLevel,
EventId eventId,
TState state,
Exception exception,
Func<TState, Exception, string> formatter)
{
if (!IsEnabled(logLevel))
{
return;
}
if (formatter == null)
{
throw new ArgumentNullException(nameof(formatter));
}
var message = formatter(state, exception);
if (exception != null)
{
message = $"{message}{Environment.NewLine}{exception}";
}
if (string.IsNullOrEmpty(message))
{
return;
}
var dblLogItem = new DBLog()
{
EventName = eventId.Name,
LogLevel = logLevel.ToString(),
Message = $"{_categoryName}{Environment.NewLine}{message}",
StackTrace=exception?.StackTrace
};
_loggerProvider.AddLogItem(dblLogItem);
}
private class NoopDisposable : IDisposable
{
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
}
}
}
Now we need to create a custom log provider so that an instance of the above custom database logger (DBLogger) can be created.
public class DbLoggerProvider : ILoggerProvider
{
private readonly CancellationTokenSource _cancellationTokenSource = new();
private readonly IList<DBLog> _currentBatch = new List<DBLog>();
private readonly TimeSpan _interval = TimeSpan.FromSeconds(2);
private readonly BlockingCollection<DBLog> _messageQueue = new(new ConcurrentQueue<DBLog>());
private readonly Task _outputTask;
private readonly IServiceProvider _serviceProvider;
private bool _isDisposed;
public DbLoggerProvider(IServiceProvider serviceProvider)
{
_serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider));
_outputTask = Task.Run(ProcessLogQueue);
}
public ILogger CreateLogger(string categoryName)
{
return new DBLogger(this, categoryName);
}
private async Task ProcessLogQueue()
{
while (!_cancellationTokenSource.IsCancellationRequested)
{
while (_messageQueue.TryTake(out var message))
{
try
{
_currentBatch.Add(message);
}
catch
{
//cancellation token canceled or CompleteAdding called
}
}
await SaveLogItemsAsync(_currentBatch, _cancellationTokenSource.Token);
_currentBatch.Clear();
await Task.Delay(_interval, _cancellationTokenSource.Token);
}
}
internal void AddLogItem(DBLog appLogItem)
{
if (!_messageQueue.IsAddingCompleted)
{
_messageQueue.Add(appLogItem, _cancellationTokenSource.Token);
}
}
private async Task SaveLogItemsAsync(IList<DBLog> items, CancellationToken cancellationToken)
{
try
{
if (!items.Any())
{
return;
}
// We need a separate context for the logger to call its SaveChanges several times,
// without using the current request's context and changing its internal state.
var scopeFactory = _serviceProvider.GetRequiredService<IServiceScopeFactory>();
using (var scope = scopeFactory.CreateScope())
{
var scopedProvider = scope.ServiceProvider;
using (var newDbContext = scopedProvider.GetRequiredService<ApplicationDbContext>())
{
foreach (var item in items)
{
var addedEntry = newDbContext.DbLogs.Add(item);
}
await newDbContext.SaveChangesAsync(cancellationToken);
// ...
}
}
}
catch
{
// don't throw exceptions from logger
}
}
[SuppressMessage("Microsoft.Usage", "CA1031:catch a more specific allowed exception type, or rethrow the exception",
Justification = "don't throw exceptions from logger")]
private void Stop()
{
_cancellationTokenSource.Cancel();
_messageQueue.CompleteAdding();
try
{
_outputTask.Wait(_interval);
}
catch
{
// don't throw exceptions from logger
}
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (!_isDisposed)
{
try
{
if (disposing)
{
Stop();
_messageQueue.Dispose();
_cancellationTokenSource.Dispose();
}
}
finally
{
_isDisposed = true;
}
}
}
}
In the end, it is enough to call this custom log provider (DbLoggerProvider) in the Startup.cs or Program.cs class.
var serviceProvider = app.ApplicationServices.CreateScope().ServiceProvider;
loggerFactory.AddProvider(new DbLoggerProvider(serviceProvider));
From now on, every time we call the _logger.LogInformation("");, the log information will also be stored in the database.
Note: Because the number of calls to record logs in the database may be high, a concurrent queue is used to store logs.
If you like, you can refer to my repository that implements the same method.
In order to log the areas separately(scope/operation), you can create several different DBLoggers to store the information in different tables.
I'm using Azure Redis Cache for development and wanted to verify the way I'm handling the exceptions. According to the best practices, it's possible to face RedisConnectionExceptions and to resolve this, we have to dispose the old ConnectionMultiplexer and create a new one. If abortConnect is set to false, then the multiplexer will silently retry connecting without throwing the error. So if the exception is thrown, it will only be after some attempts to reconect and still failing. Is my understanding of this correct?
This is my connection string -
cachename.redis.cache.windows.net:6380,password=Password,ssl=True,abortConnect=False
I believe the connection exception will only occus when you try to call GetConnection() on the multiplexer. Find my Code below -
static Lazy<ConnectionMultiplexer> multiplexer = CreateMultiplexer();
public static ConnectionMultiplexer GetConnection() => multiplexer.Value;
private static Lazy<ConnectionMultiplexer> CreateMultiplexer()
{
return new Lazy<ConnectionMultiplexer>(() => ConnectionMultiplexer.Connect(connectionString));
}
private static void CloseMultiplexer(Lazy<ConnectionMultiplexer> oldMultiplexer)
{
if (oldMultiplexer != null)
{
oldMultiplexer.Value.Close();
}
}
public static void Reconnect()
{
var oldMultiplexer = multiplexer;
CloseMultiplexer(multiplexer);
multiplexer = CreateMultiplexer();
}
And I'm Consuming this below in another class -
public class RedisCacheManager
{
private static IDatabase _cache;
private TimeSpan expiry = new TimeSpan(hours: 6, minutes: 0, seconds: 0);
public RedisCacheManager()
{
try
{
_cache = RedisCacheHelper.GetConnection().GetDatabase();
}
catch(RedisConnectionException)
{
RedisCacheHelper.Reconnect();
new RedisCacheManager();
}
}
public async Task<RedisValue[]> GetFromCacheAsync(List<string> keys)
{
var cacheValues = await _cache.StringGetAsync(keys.Select(k => (RedisKey)k).ToArray());
return cacheValues;
}
public async Task SaveInCacheAsync<TValue>(Dictionary<string, TValue> kvps)
{
var tasks = new List<Task>();
foreach(var kvp in kvps)
{
tasks.Add(_cache.StringSetAsync(kvp.Key, JsonConvert.SerializeObject(kvp), expiry));
}
await Task.WhenAll(tasks);
}
}
I'm not sure id calling the constructor in the catch block is a good practice. And are there any other exceptions that I would need to handle while calling StringGetAsync and StringSetAsync?
The CacheManager can look like this:
using Newtonsoft.Json;
using StackExchange.Redis;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
public sealed class RedisCacheManager : IDisposable
{
private readonly TimeSpan _expiry;
private readonly Lazy<ConnectionMultiplexer> _lazyConnection;
private ConnectionMultiplexer Connection { get => _lazyConnection.Value; }
public RedisCacheManager(string connectionString, TimeSpan expiry)
{
_expiry = expiry;
_lazyConnection = new Lazy<ConnectionMultiplexer>(() => ConnectionMultiplexer.Connect(connectionString));
}
public async Task<RedisValue[]> GetFromCacheAsync(IEnumerable<string> keys)
{
var cacheValues = await Connection.GetDatabase()
.StringGetAsync(keys.Select(key => (RedisKey)key).ToArray()).ConfigureAwait(false);
return cacheValues;
}
public async Task SaveInCacheAsync<TValue>(Dictionary<string, TValue> kvps)
{
var tasks = kvps
.Select(kvp => Connection.GetDatabase().StringSetAsync(kvp.Key, JsonConvert.SerializeObject(kvp), _expiry))
.ToArray();
await Task.WhenAll(tasks).ConfigureAwait(false);
}
public void Dispose()
{
if (_lazyConnection.IsValueCreated)
{
_lazyConnection.Value.Dispose();
}
}
}
Using:
public readonly static RedisCacheManager RedisCacheManager = new RedisCacheManager("connection string", TimeSpan.FromHours(6));
Remarks:
it is intended that abortConnect=false (which means that the call succeeds even if a connection to the Azure Cache for Redis is not established) and from constructor shouldn't be thrown any Redis-exceptions
The object returned from GetDatabase is a cheap pass-thru object, and does not need to be stored.
GetFromCacheAsync / SaveInCacheAsync-methods can throw an exception to outside and it is OK. You can apply Retry-policy to resolve transient faults.
If you have any IoC-container then it should create RedisCacheManager with a single instance scope (for example, Autofac registration)
Hello i am building an tcp server where i will have multiple clients connected that will send and receive data to the server.
I want to know if the framework does not create a 1:1 Thread to Client ratio but uses the threadpool how do the following happen:
1.If the method that gets executed after accepting the socket contains a loop inside it, won't the allocated thread (by the threadpool) get blocked on a client context?
2.Where is the context for each client stored?
P.S In my picture i do not understand how the blue thread (given by the threadpool to service the two clients)gets reused.
The code below contains the Handler (contains all connections) and the Client (a socket wrapper , with basic read/write functionality).
Sockets Handler
class Handler
{
private Dictionary<string, Client> clients = new Dictionary<string, Client>();
private object Lock = new object();
public Handler()
{
}
public async Task LoopAsync(WebSocketManager manager)
{
WebSocket clientSocket = await manager.AcceptWebSocketAsync();
string clientID = Ext.MakeId();
using(Client newClient = Client.Create(clientSocket, clientID))
{
while (newClient.KeepAlive)
{
await newClient.ReceiveFromSocketAsync();
}
}
}
public bool RemoveClient(string ID)
{
bool removed = false;
lock (Lock)
{
if (this.clients.TryGetValue(ID, out Client client))
{
removed= this.clients.Remove(ID);
}
}
return removed;
}
}
SocketWrapper
class Client:IDisposable
{
public static Client Create(WebSocket socket,string id)
{
return new Client(socket, id);
}
private readonly string ID;
private const int BUFFER_SIZE = 100;
private readonly byte[] Buffer;
public bool KeepAlive { get; private set; }
private readonly WebSocket socket;
private Client(WebSocket socket,string ID)
{
this.socket = socket;
this.ID = ID;
this.Buffer = new byte[BUFFER_SIZE];
}
public async Task<ReadOnlyMemory<byte>> ReceiveFromSocketAsync()
{
WebSocketReceiveResult result = await this.socket.ReceiveAsync(this.Buffer, CancellationToken.None);
this.KeepAlive = result.MessageType==WebSocketMessageType.Close?false:true;
return this.Buffer.AsMemory();
}
public async Task SendToSocketAsync(string message)
{
ReadOnlyMemory<byte> memory = Encoding.UTF8.GetBytes(message);
await this.socket.SendAsync(memory, WebSocketMessageType.Binary,true,CancellationToken.None);
}
public void Dispose()
{
this.socket.Dispose();
}
}
The service that will get injected in the application:
class SocketService
{
Handler hander;
public SocketService(Handler _handler)
{
this.hander = _handler;
}
RequestDelegate next;
public async Task Invoke(HttpContext context)
{
if (!context.WebSockets.IsWebSocketRequest)
{
await this.next(context);
return;
}
await this.hander.AddClientAsync(context.WebSockets);
}
}
I have a MVC4 .Net web application on 3-tier architecture with Unity dependency injection, and I want to shedule everyday a verficiation and send some mails where is the case. For this I want to use Quartz Scheduler in Application_start, because of the dependency injection windows service is not a good option.
Here is my code in application_start.
// construct a scheduler factory
ISchedulerFactory schedFact = new StdSchedulerFactory();
IScheduler sched = schedFact.GetScheduler();
sched.Start();
IJobDetail dailyUserMailJob = new JobDetailImpl("DailyUserMailJob", null, typeof(SchedulerJob));
// fire every time I open App/EveryDay
ITrigger dailyUserMailTrigger = new SimpleTriggerImpl("DailyUserMailTrigger", 1,
new TimeSpan(1, 0, 0, 0));
sched.ScheduleJob(dailyUserMailJob, dailyUserMailTrigger);
Here is my job code :
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using EvaluationMvc.Bll.Contracts;
using Quartz;
using Quartz.Impl;
namespace EvaluationMvc.Utils
{
public class SchedulerJob : IJob
{
private IEvaluationBus _iEvaluationBus;
public SchedulerJob(IEvaluationBus iEvaluationBus)
{
//Dependency injection
_iEvaluationBus = iEvaluationBus;
}
public void Execute(IJobExecutionContext context)
{
_iEvaluationBus.testingArchitecture();
// Sends a test mail.
}
}
}
However my job is never executed, what could be the problem ?
Quartz.net Scheduler must be created as singleton.
You can install Unity.MVC4 NuGet Package.
It will create a Bootstrapper class which should look something like this:
public static class Bootstrapper
{
public static IUnityContainer Initialise()
{
var container = BuildUnityContainer();
DependencyResolver.SetResolver(new UnityDependencyResolver(container));
return container;
}
private static IUnityContainer BuildUnityContainer()
{
var container = new UnityContainer();
// Register your interfaces here.
RegisterTypes(container);
return container;
}
public static void RegisterTypes(IUnityContainer container)
{
}
}
Then you have to create your own implementation of JobFactory. This article might help you and this one is worth reading:
public class UnityJobFactory: IJobFactory
{
private readonly IUnityContainer container;
static UnityJobFactory()
{
}
public UnityJobFactory(IUnityContainer container)
{
this.container = container;
}
public IJob NewJob(TriggerFiredBundle bundle, IScheduler scheduler)
{
var jobDetail = bundle.JobDetail;
var jobType = jobDetail.JobType;
try
{
return this.container.Resolve(jobType) as IJob;
}
catch (Exception ex)
{
throw new SchedulerException(string.Format(
CultureInfo.InvariantCulture,
"Cannot instantiate class '{0}'", new object[] { jobDetail.JobType.FullName }), ex);
}
}
public void ReturnJob(IJob job)
{
// Nothing here. Unity does not maintain a handle to container created instances.
}
}
and your own implementation of StdSchedulerFactory:
public class UnitySchedulerFactory : StdSchedulerFactory
{
private readonly UnityJobFactory unityJobFactory;
public UnitySchedulerFactory(UnityJobFactory unityJobFactory)
{
this.unityJobFactory = unityJobFactory;
}
protected override IScheduler Instantiate(QuartzSchedulerResources rsrcs, QuartzScheduler qs)
{
qs.JobFactory = this.unityJobFactory;
return base.Instantiate(rsrcs, qs);
}
}
Going back to your Unity Bootstrapper you have to register your interfaces:
private static IUnityContainer BuildUnityContainer()
{
var container = new UnityContainer();
container.RegisterType<ISchedulerFactory, UnitySchedulerFactory>(new ContainerControlledLifetimeManager());
container.RegisterType<IScheduler>(new InjectionFactory(c => c.Resolve<ISchedulerFactory>().GetScheduler()));
container.RegisterType<IQuartzScheduler, QuartzScheduler>(new ContainerControlledLifetimeManager());
container.RegisterType<IEvaluationBus, EvaluationBus>();
RegisterTypes(container);
return container;
}
I've wrapped up my service scheduler in a class so that I can create it singleton:
public interface IQuartzScheduler
{
void Run();
void Stop();
}
and:
public class QuartzScheduler : IQuartzScheduler
{
private readonly ISchedulerFactory SchedulerFactory;
private readonly IScheduler Scheduler;
public QuartzScheduler(ISchedulerFactory schedulerFactory, IScheduler scheduler)
{
this.SchedulerFactory = schedulerFactory;
this.Scheduler = scheduler;
}
public void Run()
{
IJobDetail dailyUserMailJob = new JobDetailImpl("DailyUserMailJob", null, typeof(Scheduler.SchedulerJob));
// fire every time I open App/EveryDay
ITrigger dailyUserMailTrigger = new SimpleTriggerImpl("DailyUserMailTrigger", 10,
new TimeSpan(0, 0, 0, 20));
this.Scheduler.ScheduleJob(dailyUserMailJob, dailyUserMailTrigger);
this.Scheduler.Start();
}
public void Stop()
{
this.Scheduler.Shutdown(false);
}
}
As you can see in this class I'll create my jobs/trigger and start the scheduler.
now in your Application_Start (global.asax) you can "bootstrap" your Unity Container, get the service scheduler and run it.
var unityContainer = Infrastructure.Bootstrapper.Initialise();
unityContainer.Resolve<IQuartzScheduler>().Run();
You can find a working sample following this link (QuartzWithUnity).
Very useful, thanks LeftyX. I think, in Application_Start you have to create servise like this:
var unityContainer = Bootstrapper.Initialise();
QuartzScheduler jobService = (QuartzScheduler)unityContainer.Resolve(typeof(QuartzScheduler), "Jobs");
jobService.Run();
I'm profiling out unit & integration tests, and I find the a lot of the time is spent on the finalizer of NHibernate.Transaction.AdoTransaction - this means it is not getting disposed properly.
I am not using AdoTransaction directly in the code, so it's probably used by some other object inside NHibernate. Any idea what I'm forgetting to Dispose?
Here is my text fixture:
public abstract class AbstractInMemoryFixture
{
protected ISessionFactory sessionFactory;
protected ILogger Logger { get; private set; }
static readonly Configuration config;
private static readonly ISessionFactory internalSessionFactory;
static AbstractInMemoryFixture()
{
config = new NHibernateConfigurator().Configure(NHibernateConfigurators.SQLiteInMemory());
internalSessionFactory = config.BuildSessionFactory();
}
[SetUp]
public void SetUp()
{
const string sqliteInMemoryConnectionString = "Data Source=:memory:;Version=3;Pooling=False;Max Pool Size=1;";
var con = new SQLiteConnection(sqliteInMemoryConnectionString);
con.Open();
new SchemaExport(config).Execute(false, true, false, true, con, System.Console.Out);
var proxyGenerator = new ProxyGenerator();
sessionFactory = proxyGenerator.CreateInterfaceProxyWithTarget(internalSessionFactory, new UseExistingConnectionInterceptor(con));
Logger = new NullLogger();
ExtraSetup();
}
[TearDown]
public void TearDown()
{
var con = sessionFactory.OpenSession().Connection;
if (con != null)
{
if (con.State == ConnectionState.Open)
con.Close();
con.Dispose();
}
}
private class UseExistingConnectionInterceptor :IInterceptor
{
private readonly SQLiteConnection connection;
public UseExistingConnectionInterceptor(SQLiteConnection connection)
{
this.connection = connection;
}
public void Intercept(IInvocation invocation)
{
if (invocation.Method.Name != "OpenSession" || invocation.Method.GetParameters().Length > 0)
{
invocation.Proceed();
return;
}
var factory = (ISessionFactory) invocation.InvocationTarget;
invocation.ReturnValue = factory.OpenSession(connection);
}
}
protected virtual void ExtraSetup() { }
}
I have the same problem while accessing a Sybase database. I don't know why, nor if it's really the reason of the issue, but it appears that some of the code in \NHibernate\Transaction\AdoTransaction.cs (lines 307 to 311) related to closing/disposing the object have been disabled for a while. Unfortunately, the blame feature of SVN does not give too much info :(