Correct way to use Serilog with WebApi2 - asp.net-web-api2

I am in search of the correct way to use serilog with aspnet webapi2.
As for now I initialize the global Log.Logger property like that :
public static void Register(HttpConfiguration config)
{
Log.Logger = new LoggerConfiguration()
.WriteTo.Elasticsearch(new ElasticsearchSinkOptions(new Uri("http://localhost:9200"))
{
IndexFormat = IndexFormat,
BufferBaseFilename = outputLogPath,
AutoRegisterTemplate = true,
AutoRegisterTemplateVersion = AutoRegisterTemplateVersion.ESv6,
CustomFormatter = new ElasticsearchJsonFormatter(renderMessageTemplate: false),
BufferFileCountLimit = NbDaysRetention
})
.MinimumLevel.ControlledBy(new LoggingLevelSwitch() { MinimumLevel = LogEventLevel.Information})
.Enrich.FromLogContext()
.Enrich.WithWebApiRouteTemplate()
.Enrich.WithWebApiActionName()
.CreateLogger();
//Trace all requests
SerilogWebClassic.Configure(cfg => cfg.LogAtLevel(LogEventLevel.Information));
config.MapHttpAttributeRoutes();
config.Routes.MapHttpRoute(
name: "DefaultApi",
routeTemplate: "api/{controller}/{id}",
defaults: new { id = RouteParameter.Optional }
);
}
Is there a more cleaner way to do it? I am wondering, if this might be a problem if I have to put some test in place for my controllers.

I have used the following code organization quite a lot for apps with Web API (and/or MVC). (note that it may be a bit outdated, as it is based on old versions of some packages, but you should be able to adapt it without too much work ...)
You will need quite a few packages, that you should be able to guess from the namespaces, but most importantly, install the WebActivatorEx package which provides a way to have code running at different moment of the app lifecycle
Our Global.asax.cs ended up looking like this :
public class WebApiApplication : System.Web.HttpApplication
{
// rely on the fact that AppPreStart is called before Application_Start
private static readonly ILogger Logger = Log.ForContext<WebApiApplication>();
public override void Init()
{
base.Init();
}
protected void Application_Start()
{
// WARNING : Some code runs even before this method ... see AppPreStart
Logger.Debug("In Application_Start");
// Mvc (must be before)
AreaRegistration.RegisterAllAreas();
// Web API
// ... snip ...
// some DependencyInjection config ...
// ... snip ...
// MVC
FilterConfig.RegisterGlobalFilters(GlobalFilters.Filters);
RouteConfig.RegisterRoutes(RouteTable.Routes);
BundleConfig.RegisterBundles(BundleTable.Bundles);
Logger.Information("App started !");
}
protected void Application_End(object sender, EventArgs e)
{
Logger.Debug("In Application_End");
ApplicationShutdownReason shutdownReason = System.Web.Hosting.HostingEnvironment.ShutdownReason;
Logger.Information("App is shutting down (reason = {#shutdownReason})", shutdownReason);
// WARNING : Some code runs AFTER Application_End ... see AppPostShutDown
}
}
and then several classes under the App_Start folder (some of which you can just ignore :P ) :
AppPreStart.cs
using XXX;
using Serilog;
[assembly: WebActivatorEx.PreApplicationStartMethod(typeof(AppPreStart), nameof(AppPreStart.PreApplicationStart))]
namespace XXX
{
/// <summary>
/// This runs even before global.asax Application_Start (see WebActivatorConfig)
/// </summary>
public class AppPreStart
{
public static void PreApplicationStart()
{
LogConfig.Configure();
var logger = Log.ForContext<AppPreStart>();
logger.Information("App is starting ...");
// ... snip ...
// very early things like IoC config, AutoMapper config ...
// ... snip ...
logger.Debug("Done with AppPreStart");
}
}
}
AppPostShutDown.cs
using XXX;
using Serilog;
[assembly: WebActivatorEx.ApplicationShutdownMethod(typeof(AppPostShutDown), nameof(AppPostShutDown.PostApplicationShutDown))]
namespace XXX
{
/// <summary>
/// This runs even before global.asax Application_Start (see WebActivatorConfig)
/// </summary>
public class AppPostShutDown
{
private static ILogger Logger = Log.ForContext<AppPostShutDown>();
public static void PostApplicationShutDown()
{
Logger.Debug("PostApplicationShutDown");
// ... snip ...
// very late things like IoC dispose ....
// ... snip ...
// force flushing the last "not logged" events
Logger.Debug("Closing the logger! ");
Log.CloseAndFlush();
}
}
}
LogConfig.cs
using Serilog;
using Serilog.Events;
using SerilogWeb.Classic;
using SerilogWeb.Classic.Enrichers;
using SerilogWeb.Classic.WebApi.Enrichers;
namespace XXX
{
public class LogConfig
{
static public void Configure()
{
ApplicationLifecycleModule.LogPostedFormData = LogPostedFormDataOption.OnlyOnError;
ApplicationLifecycleModule.FormDataLoggingLevel = LogEventLevel.Debug;
ApplicationLifecycleModule.RequestLoggingLevel = LogEventLevel.Debug;
var loggerConfiguration = new LoggerConfiguration().ReadFrom.AppSettings()
.Enrich.FromLogContext()
.Enrich.With<HttpRequestIdEnricher>()
.Enrich.With<UserNameEnricher>()
.Enrich.With<HttpRequestUrlEnricher>()
.Enrich.With<WebApiRouteTemplateEnricher>()
.Enrich.With<WebApiControllerNameEnricher>()
.Enrich.With<WebApiRouteDataEnricher>()
.Enrich.With<WebApiActionNameEnricher>()
;
Log.Logger = loggerConfiguration.CreateLogger();
}
}
}
and then read the variable parts of the Log configuration from Web.config that has the following keys in AppSettings :
<!-- SeriLog-->
<add key="serilog:level-switch:$controlSwitch" value="Information" />
<add key="serilog:minimum-level:controlled-by" value="$controlSwitch" />
<add key="serilog:enrich:with-property:AppName" value="XXXApp" />
<add key="serilog:enrich:with-property:AppComponent" value="XXXComponent" />
<add key="serilog:enrich:with-property:Environment" value="Dev" />
<add key="serilog:enrich:with-property:MachineName" value="%COMPUTERNAME%" />
<add key="serilog:using:Seq" value="Serilog.Sinks.Seq" />
<add key="serilog:write-to:Seq.serverUrl" value="http://localhost:5341" />
<add key="serilog:write-to:Seq.apiKey" value="xxxxxxxxxxx" />
<add key="serilog:write-to:Seq.controlLevelSwitch" value="$controlSwitch" />
(and then we had Web.config transforms to turn it into a "tokenized" file for production
Web.Release.config
<!-- SeriLog-->
<add key="serilog:enrich:with-property:Environment" value="__Release_EnvironmentName__"
xdt:Transform="Replace" xdt:Locator="Match(key)"/>
<add key="serilog:write-to:Seq.serverUrl" value="__app_serilogSeqUrl__"
xdt:Transform="Replace" xdt:Locator="Match(key)"/>
<add key="serilog:write-to:Seq.apiKey" value="__app_serilogApiKey__"
xdt:Transform="Replace" xdt:Locator="Match(key)"/>
Some of the most important parts of this configuration is :
configure your logger as soon as possible
call Log.CloseAndFlush(); at the very end to be sure all your log events are stored/pushed
add Enrich.FromLogContext() from Serilog, and some enrichers from SerilogWeb.Classic and SerilogWeb.WebApi, they can turn out to be super useful.
logging to a log server that properly supports structured logging (writing to files just has too many drawbacks) ... we used Seq and were very very happy about it (installed locally on every dev machine, and then a centralized instance in production). It supports searching/querying and dashboards and also dynamic log level control.

Related

.Net Core Logging Dependency Injection - resolve ILogger / ILogger`1 - what does the tick mean? Resolve type possible?

I'm working with a .Net Core 3.1 XUnit project.
I create a SerivceCollection and call the AddLogging extension method. Then I can create an ILogger instance with the LoggerFactory / ILoggerFactory and when I debug, I can my ServiceCollection has a ServiceDescriptor for this type:
Lifetime = Singleton, ServiceType = {Name = "ILogger`1" FullName = "Microsoft.Extensions.Logging.ILogger`1"}, ImplementationType = {Name = "Logger`1" FullName = "Microsoft.Extensions.Logging.Logger`1"}
I'm curious what that tick mark means in the type name and if it's possible to resolve an ILogger instance without using the LoggerFactory.
Here are a couple failed attempts at resolving ILogger`1. The last call to CreateLogger works.
[Fact]
public void AddLogging_RegistersILogger()
{
var services = new ServiceCollection().AddLogging();
var serviceProvider = services.BuildServiceProvider();
var logger1 = serviceProvider.GetService<ILogger>(); // logger == null
try
{
var logger2 = serviceProvider.GetService(typeof(ILogger<>));
}
catch (Exception e)
{
// Implementation type 'Microsoft.Extensions.Logging.Logger`1[T]' can't be converted to service type 'Microsoft.Extensions.Logging.ILogger`1[TCategoryName]'
}
try
{
var loggerTypes = Assembly.GetAssembly(typeof(ILogger)).GetTypes().Where(t => t.Name == "ILogger`1");
var loggerType = loggerTypes.First();
var logger3 = serviceProvider.GetService(loggerType);
}
catch(Exception e)
{
// Implementation type 'Microsoft.Extensions.Logging.Logger`1[T]' can't be converted to service type 'Microsoft.Extensions.Logging.ILogger`1[TCategoryName]'
}
var logger4 = serviceProvider.GetService<ILoggerFactory>().CreateLogger<DependencyInjectionTests>();
Assert.NotNull(logger4);
}
The tick on "ILogger`1" means its a generic type, i.e. ILogger<CategoryName>
You can inject ILogger<T> for any generic type. The generic type is used to set the category name in a convenient manner, e.g. ILogger<Controller> will have "Microsoft.AspNetCore.Mvc.Controller" category name
The common practice is for each class to have a logger with the category name of the class, e.g. a class namedMyService will have a logger ILogger<MyService>
You can either inject ILogger<T> or use ILoggerFactory:
Injecting ILogger<MyService> is equivalent to calling loggerFactory.CreateLogger<MyService>() or loggerFactory.CreateLogger("<namespace_of_my_service>.MyService")
I resolved the problem by implemented NLog using the following steps in asp.net core
https://code-maze.com/net-core-web-development-part3/
Nuget:
1. NLog.Config
2. NLog.Extensions.Logging
Logging
functionality added to software so someone can get insight into the software
Logging as targets and rules.
nlog.config (change property to copy always)
1. the logfile name creates a file in the logs directory
2. debug is wrote to the logfile target
<?xml version="1.0" encoding="utf-8" ?>
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
autoReload="true"
internalLogLevel="Trace"
internalLogFile="internallog.txt">
<targets>
<target name="logfile" xsi:type="File"
fileName="${startupdir}\logs\${shortdate}_logfile.txt"
layout="${longdate} ${level:uppercase=true} ${message}"/>
</targets>
<rules>
<logger name="*" minlevel="Debug" writeTo="logfile" />
</rules>
</nlog>
startup.cs
1. load the nlog.config
2. ConfigureLoggerService is part of the static class called Service Extensions. Its job is to create a singleton task for the LoggerManager
public Startup(IConfiguration configuration)
{
LogManager.LoadConfiguration(#"nlog.config");
Configuration = configuration;
//_env = env;
}
public void ConfigureServices(IServiceCollection services)
{
services.ConfigureLoggerService();
}
serviceExtensions.cs
public static class ServiceExtensions
{
public static void ConfigureLoggerService(this IServiceCollection services)
{
services.AddSingleton<ILoggerManager, LoggerManager>();
}
}
LoggerManager.cs
1. use the dependency injected Logger for nLog to post based on type.
public class LoggerManager : ILoggerManager
{
private static ILogger logger = LogManager.GetCurrentClassLogger();
public void LogDebug(string message)
{
logger.Debug(message);
}
public void LogError(string message)
{
logger.Error(message);
}
public void LogInfo(string message)
{
logger.Info(message);
}
public void LogWarn(string message)
{
logger.Warn(message);
}
}

How to start Quartz in ASP.NET Core?

I have the following class
public class MyEmailService
{
public async Task<bool> SendAdminEmails()
{
...
}
public async Task<bool> SendUserEmails()
{
...
}
}
public interface IMyEmailService
{
Task<bool> SendAdminEmails();
Task<bool> SendUserEmails();
}
I have installed the latest Quartz 2.4.1 Nuget package as I wanted a lightweight scheduler in my web app without a separate SQL Server database.
I need to schedule the methods
SendUserEmails to run every week on Mondays 17:00,Tuesdays 17:00 & Wednesdays 17:00
SendAdminEmails to run every week on Thursdays 09:00, Fridays 9:00
What code do I need to schedule these methods using Quartz in ASP.NET Core? I also need to know how to start Quartz in ASP.NET Core as all code samples on the internet still refer to previous versions of ASP.NET.
I can find a code sample for the previous version of ASP.NET but I don't know how to start Quartz in ASP.NET Core to start testing.
Where do I put the JobScheduler.Start(); in ASP.NET Core?
TL;DR (full answer can be found below)
Assumed tooling: Visual Studio 2017 RTM, .NET Core 1.1, .NET Core SDK 1.0, SQL Server Express 2016 LocalDB.
In web application .csproj:
<Project Sdk="Microsoft.NET.Sdk.Web">
<!-- .... existing contents .... -->
<!-- add the following ItemGroup element, it adds required packages -->
<ItemGroup>
<PackageReference Include="Quartz" Version="3.0.0-alpha2" />
<PackageReference Include="Quartz.Serialization.Json" Version="3.0.0-alpha2" />
</ItemGroup>
</Project>
In the Program class (as scaffolded by Visual Studio by default):
public class Program
{
private static IScheduler _scheduler; // add this field
public static void Main(string[] args)
{
var host = new WebHostBuilder()
.UseKestrel()
.UseContentRoot(Directory.GetCurrentDirectory())
.UseIISIntegration()
.UseStartup<Startup>()
.UseApplicationInsights()
.Build();
StartScheduler(); // add this line
host.Run();
}
// add this method
private static void StartScheduler()
{
var properties = new NameValueCollection {
// json serialization is the one supported under .NET Core (binary isn't)
["quartz.serializer.type"] = "json",
// the following setup of job store is just for example and it didn't change from v2
// according to your usage scenario though, you definitely need
// the ADO.NET job store and not the RAMJobStore.
["quartz.jobStore.type"] = "Quartz.Impl.AdoJobStore.JobStoreTX, Quartz",
["quartz.jobStore.useProperties"] = "false",
["quartz.jobStore.dataSource"] = "default",
["quartz.jobStore.tablePrefix"] = "QRTZ_",
["quartz.jobStore.driverDelegateType"] = "Quartz.Impl.AdoJobStore.SqlServerDelegate, Quartz",
["quartz.dataSource.default.provider"] = "SqlServer-41", // SqlServer-41 is the new provider for .NET Core
["quartz.dataSource.default.connectionString"] = #"Server=(localdb)\MSSQLLocalDB;Database=Quartz;Integrated Security=true"
};
var schedulerFactory = new StdSchedulerFactory(properties);
_scheduler = schedulerFactory.GetScheduler().Result;
_scheduler.Start().Wait();
var userEmailsJob = JobBuilder.Create<SendUserEmailsJob>()
.WithIdentity("SendUserEmails")
.Build();
var userEmailsTrigger = TriggerBuilder.Create()
.WithIdentity("UserEmailsCron")
.StartNow()
.WithCronSchedule("0 0 17 ? * MON,TUE,WED")
.Build();
_scheduler.ScheduleJob(userEmailsJob, userEmailsTrigger).Wait();
var adminEmailsJob = JobBuilder.Create<SendAdminEmailsJob>()
.WithIdentity("SendAdminEmails")
.Build();
var adminEmailsTrigger = TriggerBuilder.Create()
.WithIdentity("AdminEmailsCron")
.StartNow()
.WithCronSchedule("0 0 9 ? * THU,FRI")
.Build();
_scheduler.ScheduleJob(adminEmailsJob, adminEmailsTrigger).Wait();
}
}
An example of a job class:
public class SendUserEmailsJob : IJob
{
public Task Execute(IJobExecutionContext context)
{
// an instance of email service can be obtained in different ways,
// e.g. service locator, constructor injection (requires custom job factory)
IMyEmailService emailService = new MyEmailService();
// delegate the actual work to email service
return emailService.SendUserEmails();
}
}
Full answer
Quartz for .NET Core
First, you have to use v3 of Quartz, as it targets .NET Core, according to this announcement.
Currently, only alpha versions of v3 packages are available on NuGet. It looks like the team put a lot of effort into releasing 2.5.0, which does not target .NET Core. Nevertheless, in their GitHub repo, the master branch is already dedicated to v3, and basically, open issues for v3 release don't seem to be critical, mostly old wishlist items, IMHO. Since recent commit activity is quite low, I would expect v3 release in few months, or maybe half year - but no one knows.
Jobs and IIS recycling
If the web application is going to be hosted under IIS, you have to take into consideration recycling/unloading behavior of worker processes. The ASP.NET Core web app runs as a regular .NET Core process, separate from w3wp.exe - IIS only serves as a reverse proxy. Nevertheless, when an instance of w3wp.exe is recycled or unloaded, the related .NET Core app process is also signaled to exit (according to this).
Web application can also be self-hosted behind a non-IIS reverse proxy (e.g. NGINX), but I will assume that you do use IIS, and narrow my answer accordingly.
The problems that recycling/unloading introduces are explained well in the post referenced by #darin-dimitrov:
If for example, on Friday 9:00 the process is down, because several hours earlier it was unloaded by IIS due to inactivity - no admin emails will be sent until the process is up again. To avoid that, configure IIS to minimize unloads/recyclings (see this answer).
From my experience, the above configuration still doesn't give a 100% guarantee that IIS will never unload the application. For 100% guarantee that your process is up, you can setup a command that periodically sends requests to your application, and thus keeps it alive.
When the host process is recycled/unloaded, the jobs must be gracefully stopped, to avoid data corruption.
Why would you host scheduled jobs in a web app
I can think of one justification of having those email jobs hosted in a web app, despite the problems listed above. It is decision to have only one kind of application model (ASP.NET). Such approach simplifies learning curve, deployment procedure, production monitoring, etc.
If you don't want to introduce backend microservices (which would be a good place to move the email jobs to), then it makes sense to overcome IIS recycling/unloading behaviors, and run Quartz inside a web app.
Or maybe you have other reasons.
Persistent job store
In your scenario, status of job execution must be persisted out of process. Therefore, default RAMJobStore doesn't fit, and you have to use the ADO.NET Job Store.
Since you mentioned SQL Server in the question, I will provide example setup for SQL Server database.
How to start (and gracefully stop) the scheduler
I assume you use Visual Studio 2017 and latest/recent version of .NET Core tooling. Mine is .NET Core Runtime 1.1 and .NET Core SDK 1.0.
For DB setup example, I will use a database named Quartz in SQL Server 2016 Express LocalDB. DB setup scripts can be found here.
First, add required package references to web application .csproj (or do it with NuGet package manager GUI in Visual Studio):
<Project Sdk="Microsoft.NET.Sdk.Web">
<!-- .... existing contents .... -->
<!-- the following ItemGroup adds required packages -->
<ItemGroup>
<PackageReference Include="Quartz" Version="3.0.0-alpha2" />
<PackageReference Include="Quartz.Serialization.Json" Version="3.0.0-alpha2" />
</ItemGroup>
</Project>
With the help of Migration Guide and the V3 Tutorial, we can figure out how to start and stop the scheduler. I prefer to encapsulate this in a separate class, let's name it QuartzStartup.
using System;
using System.Collections.Specialized;
using System.Threading.Tasks;
using Quartz;
using Quartz.Impl;
namespace WebApplication1
{
// Responsible for starting and gracefully stopping the scheduler.
public class QuartzStartup
{
private IScheduler _scheduler; // after Start, and until shutdown completes, references the scheduler object
// starts the scheduler, defines the jobs and the triggers
public void Start()
{
if (_scheduler != null)
{
throw new InvalidOperationException("Already started.");
}
var properties = new NameValueCollection {
// json serialization is the one supported under .NET Core (binary isn't)
["quartz.serializer.type"] = "json",
// the following setup of job store is just for example and it didn't change from v2
["quartz.jobStore.type"] = "Quartz.Impl.AdoJobStore.JobStoreTX, Quartz",
["quartz.jobStore.useProperties"] = "false",
["quartz.jobStore.dataSource"] = "default",
["quartz.jobStore.tablePrefix"] = "QRTZ_",
["quartz.jobStore.driverDelegateType"] = "Quartz.Impl.AdoJobStore.SqlServerDelegate, Quartz",
["quartz.dataSource.default.provider"] = "SqlServer-41", // SqlServer-41 is the new provider for .NET Core
["quartz.dataSource.default.connectionString"] = #"Server=(localdb)\MSSQLLocalDB;Database=Quartz;Integrated Security=true"
};
var schedulerFactory = new StdSchedulerFactory(properties);
_scheduler = schedulerFactory.GetScheduler().Result;
_scheduler.Start().Wait();
var userEmailsJob = JobBuilder.Create<SendUserEmailsJob>()
.WithIdentity("SendUserEmails")
.Build();
var userEmailsTrigger = TriggerBuilder.Create()
.WithIdentity("UserEmailsCron")
.StartNow()
.WithCronSchedule("0 0 17 ? * MON,TUE,WED")
.Build();
_scheduler.ScheduleJob(userEmailsJob, userEmailsTrigger).Wait();
var adminEmailsJob = JobBuilder.Create<SendAdminEmailsJob>()
.WithIdentity("SendAdminEmails")
.Build();
var adminEmailsTrigger = TriggerBuilder.Create()
.WithIdentity("AdminEmailsCron")
.StartNow()
.WithCronSchedule("0 0 9 ? * THU,FRI")
.Build();
_scheduler.ScheduleJob(adminEmailsJob, adminEmailsTrigger).Wait();
}
// initiates shutdown of the scheduler, and waits until jobs exit gracefully (within allotted timeout)
public void Stop()
{
if (_scheduler == null)
{
return;
}
// give running jobs 30 sec (for example) to stop gracefully
if (_scheduler.Shutdown(waitForJobsToComplete: true).Wait(30000))
{
_scheduler = null;
}
else
{
// jobs didn't exit in timely fashion - log a warning...
}
}
}
}
Note 1. In the above example, SendUserEmailsJob and SendAdminEmailsJob are classes that implement IJob. The IJob interface is slightly different from IMyEmailService, because it returns void Task and not Task<bool>. Both job classes should get IMyEmailService as a dependency (probably constructor injection).
Note 2. For a long-running job to be able to exit in timely fashion, in the IJob.Execute method, it should observe the status of IJobExecutionContext.CancellationToken. This may require change in IMyEmailService interface, to make its methods receive CancellationToken parameter:
public interface IMyEmailService
{
Task<bool> SendAdminEmails(CancellationToken cancellation);
Task<bool> SendUserEmails(CancellationToken cancellation);
}
When and where to start and stop the scheduler
In ASP.NET Core, application bootstrap code resides in class Program, much like in console app. The Main method is called to create web host, run it, and wait until it exits:
public class Program
{
public static void Main(string[] args)
{
var host = new WebHostBuilder()
.UseKestrel()
.UseContentRoot(Directory.GetCurrentDirectory())
.UseIISIntegration()
.UseStartup<Startup>()
.UseApplicationInsights()
.Build();
host.Run();
}
}
The simplest thing to do is just put a call to QuartzStartup.Start right in the Main method, much like as I did in TL;DR. But since we have to properly handle process shutdown as well, I prefer to hook both startup and shutdown code in a more consistent manner.
This line:
.UseStartup<Startup>()
refers to a class named Startup, which is scaffolded when creating new ASP.NET Core Web Application project in Visual Studio. The Startup class looks like this:
public class Startup
{
public Startup(IHostingEnvironment env)
{
// scaffolded code...
}
public IConfigurationRoot Configuration { get; }
// This method gets called by the runtime. Use this method to add services to the container.
public void ConfigureServices(IServiceCollection services)
{
// scaffolded code...
}
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
// scaffolded code...
}
}
It is clear that a call to QuartzStartup.Start should be inserted in one of methods in the Startup class. The question is, where QuartzStartup.Stop should be hooked.
In the legacy .NET Framework, ASP.NET provided IRegisteredObject interface. According to this post, and the documentation, in ASP.NET Core it was replaced with IApplicationLifetime. Bingo. An instance of IApplicationLifetime can be injected into Startup.Configure method through a parameter.
For consistency, I will hook both QuartzStartup.Start and QuartzStartup.Stop to IApplicationLifetime:
public class Startup
{
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(
IApplicationBuilder app,
IHostingEnvironment env,
ILoggerFactory loggerFactory,
IApplicationLifetime lifetime) // added this parameter
{
// the following 3 lines hook QuartzStartup into web host lifecycle
var quartz = new QuartzStartup();
lifetime.ApplicationStarted.Register(quartz.Start);
lifetime.ApplicationStopping.Register(quartz.Stop);
// .... original scaffolded code here ....
}
// ....the rest of the scaffolded members ....
}
Note that I have extended the signature of the Configure method with an additional IApplicationLifetime parameter. According to documentation, ApplicationStopping will block until registered callbacks are completed.
Graceful shutdown on IIS Express, and ASP.NET Core module
I was able to observe expected behavior of IApplicationLifetime.ApplicationStopping hook only on IIS, with the latest ASP.NET Core module installed. Both IIS Express (installed with Visual Studio 2017 Community RTM), and IIS with an outdated version of ASP.NET Core module didn't consistently invoke IApplicationLifetime.ApplicationStopping. I believe it is because of this bug that was fixed.
You can install latest version of ASP.NET Core module from here. Follow the instructions in the "Installing the latest ASP.NET Core Module" section.
Quartz vs. FluentScheduler
I also took a look at FluentScheduler, as it was proposed as an alternative library by #Brice Molesti. To my first impression, FluentScheduler is quite a simplistic and immature solution, compared to Quartz. For example, FluentScheduler doesn't provide such fundamental features as job status persistence and clustered execution.
In addition to #felix-b answer. Adding DI to jobs. Also QuartzStartup Start can be made async.
Based on this answer: https://stackoverflow.com/a/42158004/1235390
public class QuartzStartup
{
public QuartzStartup(IServiceProvider serviceProvider)
{
_serviceProvider = serviceProvider;
}
public async Task Start()
{
// other code is same
_scheduler = await schedulerFactory.GetScheduler();
_scheduler.JobFactory = new JobFactory(_serviceProvider);
await _scheduler.Start();
var sampleJob = JobBuilder.Create<SampleJob>().Build();
var sampleTrigger = TriggerBuilder.Create().StartNow().WithCronSchedule("0 0/1 * * * ?").Build();
await _scheduler.ScheduleJob(sampleJob, sampleTrigger);
}
}
JobFactory class
public class JobFactory : IJobFactory
{
private IServiceProvider _serviceProvider;
public JobFactory(IServiceProvider serviceProvider)
{
_serviceProvider = serviceProvider;
}
public IJob NewJob(TriggerFiredBundle bundle, IScheduler scheduler)
{
return _serviceProvider.GetService(bundle.JobDetail.JobType) as IJob;
}
public void ReturnJob(IJob job)
{
(job as IDisposable)?.Dispose();
}
}
Startup class:
public void ConfigureServices(IServiceCollection services)
{
// other code is removed for brevity
// need to register all JOBS by their class name
services.AddTransient<SampleJob>();
}
public void Configure(IApplicationBuilder app, IHostingEnvironment env, IApplicationLifetime applicationLifetime)
{
var quartz = new QuartzStartup(_services.BuildServiceProvider());
applicationLifetime.ApplicationStarted.Register(() => quartz.Start());
applicationLifetime.ApplicationStopping.Register(quartz.Stop);
// other code removed for brevity
}
SampleJob class with contructor dependency injection:
public class SampleJob : IJob
{
private readonly ILogger<SampleJob> _logger;
public SampleJob(ILogger<SampleJob> logger)
{
_logger = logger;
}
public async Task Execute(IJobExecutionContext context)
{
_logger.LogDebug("Execute called");
}
}
I don't know how to do it with Quartz, but i had experimented the same scenario with an other library wich works very well. Here how I dit it
Install FluentScheduler
Install-Package FluentScheduler
Use it like this
var registry = new Registry();
JobManager.Initialize(registry);
JobManager.AddJob(() => MyEmailService.SendAdminEmails(), s => s
.ToRunEvery(1)
.Weeks()
.On(DayOfWeek.Monday)
.At(17, 00));
JobManager.AddJob(() => MyEmailService.SendAdminEmails(), s => s
.ToRunEvery(1)
.Weeks()
.On(DayOfWeek.Wednesday)
.At(17, 00));
JobManager.AddJob(() => MyEmailService.SendUserEmails(), s => s
.ToRunEvery(1)
.Weeks()
.On(DayOfWeek.Thursday)
.At(09, 00));
JobManager.AddJob(() => MyEmailService.SendUserEmails(), s => s
.ToRunEvery(1)
.Weeks()
.On(DayOfWeek.Friday)
.At(09, 00));
Documentation can be found here FluentScheduler on GitHub
What code do I need to schedule these methods using Quartz in ASP.NET Core? I also need to know how to start Quartz in ASP.NET Core as all code samples on the internet still refer to previous versions of ASP.NET.
Hi, there is now a good quartz DI to initialize and use
[DisallowConcurrentExecution]
public class Job1 : IJob
{
private readonly ILogger<Job1> _logger;
public Job1(ILogger<Job1> logger)
{
_logger = logger;
}
public async Task Execute(IJobExecutionContext context)
{
_logger.LogInformation("Start job1");
await Task.Delay(2, context.CancellationToken);
_logger?.LogInformation("End job1");
}
}
public class Startup
{
public Startup(IConfiguration configuration)
{
Configuration = configuration;
}
public IConfiguration Configuration { get; }
// This method gets called by the runtime. Use this method to add services to the container.
public void ConfigureServices(IServiceCollection services)
{
services.AddControllers();
services.AddQuartz(cfg =>
{
cfg.UseMicrosoftDependencyInjectionJobFactory(opt =>
{
opt.AllowDefaultConstructor = false;
});
cfg.AddJob<Job1>(jobCfg =>
{
jobCfg.WithIdentity("job1");
});
cfg.AddTrigger(trigger =>
{
trigger
.ForJob("job1")
.WithIdentity("trigger1")
.WithSimpleSchedule(x => x
.WithIntervalInSeconds(10)
.RepeatForever());
});
});
services.AddQuartzHostedService(opt =>
{
opt.WaitForJobsToComplete = true;
});
}
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
{
// standart impl
}
}
The accepted answer covers the topic very well, but some things have changed with the latest Quartz version. The following is based on this article shows a quick start with Quartz 3.0.x and ASP.NET Core 2.2:
Util class
public class QuartzServicesUtilities
{
public static void StartJob<TJob>(IScheduler scheduler, TimeSpan runInterval)
where TJob : IJob
{
var jobName = typeof(TJob).FullName;
var job = JobBuilder.Create<TJob>()
.WithIdentity(jobName)
.Build();
var trigger = TriggerBuilder.Create()
.WithIdentity($"{jobName}.trigger")
.StartNow()
.WithSimpleSchedule(scheduleBuilder =>
scheduleBuilder
.WithInterval(runInterval)
.RepeatForever())
.Build();
scheduler.ScheduleJob(job, trigger);
}
}
Job factory
public class QuartzJobFactory : IJobFactory
{
private readonly IServiceProvider _serviceProvider;
public QuartzJobFactory(IServiceProvider serviceProvider)
{
_serviceProvider = serviceProvider;
}
public IJob NewJob(TriggerFiredBundle bundle, IScheduler scheduler)
{
var jobDetail = bundle.JobDetail;
var job = (IJob)_serviceProvider.GetService(jobDetail.JobType);
return job;
}
public void ReturnJob(IJob job) { }
}
A job sample that also deals with exiting on application pool recycle / exit
[DisallowConcurrentExecution]
public class TestJob : IJob
{
private ILoggingService Logger { get; }
private IApplicationLifetime ApplicationLifetime { get; }
private static object lockHandle = new object();
private static bool shouldExit = false;
public TestJob(ILoggingService loggingService, IApplicationLifetime applicationLifetime)
{
Logger = loggingService;
ApplicationLifetime = applicationLifetime;
}
public Task Execute(IJobExecutionContext context)
{
return Task.Run(() =>
{
ApplicationLifetime.ApplicationStopping.Register(() =>
{
lock (lockHandle)
{
shouldExit = true;
}
});
try
{
for (int i = 0; i < 10; i ++)
{
lock (lockHandle)
{
if (shouldExit)
{
Logger.LogDebug($"TestJob detected that application is shutting down - exiting");
break;
}
}
Logger.LogDebug($"TestJob ran step {i+1}");
Thread.Sleep(3000);
}
}
catch (Exception exc)
{
Logger.LogError(exc, "An error occurred during execution of scheduled job");
}
});
}
}
Startup.cs configuration
private void ConfigureQuartz(IServiceCollection services, params Type[] jobs)
{
services.AddSingleton<IJobFactory, QuartzJobFactory>();
services.Add(jobs.Select(jobType => new ServiceDescriptor(jobType, jobType, ServiceLifetime.Singleton)));
services.AddSingleton(provider =>
{
var schedulerFactory = new StdSchedulerFactory();
var scheduler = schedulerFactory.GetScheduler().Result;
scheduler.JobFactory = provider.GetService<IJobFactory>();
scheduler.Start();
return scheduler;
});
}
protected void ConfigureJobsIoc(IServiceCollection services)
{
ConfigureQuartz(services, typeof(TestJob), /* other jobs come here */);
}
public void ConfigureServices(IServiceCollection services)
{
ConfigureJobsIoc(services);
// other stuff comes here
AddDbContext(services);
AddCors(services);
services
.AddMvc()
.SetCompatibilityVersion(CompatibilityVersion.Version_2_2);
}
protected void StartJobs(IApplicationBuilder app, IApplicationLifetime lifetime)
{
var scheduler = app.ApplicationServices.GetService<IScheduler>();
//TODO: use some config
QuartzServicesUtilities.StartJob<TestJob>(scheduler, TimeSpan.FromSeconds(60));
lifetime.ApplicationStarted.Register(() => scheduler.Start());
lifetime.ApplicationStopping.Register(() => scheduler.Shutdown());
}
public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory,
ILoggingService logger, IApplicationLifetime lifetime)
{
StartJobs(app, lifetime);
// other stuff here
}

Portable Area not finding embedded files MVC4 C#

I have a solution with two projects in it. A portable areas project, and a web site that references the portable areas project, and Both reference MvcContrib.
The problem I am have is with the embedded resources, they are giving me a 404 error when I try to get to them. It seems like it's trying to access a physical path not the dll. The partial view works fine.
The file I'm trying to access looks like this inside my visual studio solution explorer
AdHocReporting/Areas/AdHocReportBuilder/Content/adhoc.css (the build action is embedded)
Here is the routing for the portable area:
using System.Web.Mvc;
using MvcContrib.PortableAreas;
namespace AdHocReporting.Areas.AdHocReportBuilder
{
public class AdHocReportBuilderAreaRegistration : PortableAreaRegistration
{
public override string AreaName
{
get { return "AdHocReportBuilder"; }
}
public override void RegisterArea(AreaRegistrationContext context, IApplicationBus bus)
{
RegisterRoutes(context);
RegisterAreaEmbeddedResources();
}
private void RegisterRoutes(AreaRegistrationContext context)
{
context.MapRoute(
AreaName + "_content",
base.AreaRoutePrefix + "/Content/{resourceName}",
new { controller = "EmbeddedResource", action = "Index", resourcePath = "Content" },
new[] { "MvcContrib.PortableAreas" }
);
context.MapRoute(
AreaName + "_default",
base.AreaRoutePrefix + "/{controller}/{action}/{id}",
new { action = "Index", id = UrlParameter.Optional }
);
}
}
}
Here is the Web sites Global.aspx that has the reference to the portable area
namespace AdHocReportingSite
{
public class MvcApplication : System.Web.HttpApplication
{
public static void RegisterGlobalFilters(GlobalFilterCollection filters)
{
filters.Add(new HandleErrorAttribute());
}
public static void RegisterRoutes(RouteCollection routes)
{
routes.IgnoreRoute("{resource}.axd/{*pathInfo}");
routes.MapRoute(
"Default", // Route name
"{controller}/{action}/{id}", // URL with parameters
new { controller = "Home", action = "Index", id = UrlParameter.Optional } // Parameter defaults
);
}
protected void Application_Start()
{
AreaRegistration.RegisterAllAreas();
PortableAreaRegistration.RegisterEmbeddedViewEngine();
FilterConfig.RegisterGlobalFilters(GlobalFilters.Filters);
RouteConfig.RegisterRoutes(RouteTable.Routes);
}
}
}
here is an image of what my solution explore looks like:
Could not reproduce the issue once I created a new project and redid everything. I'm not sure if it was an error or I just missed a step in setting up my portable areas.
In the Web.Config add for each file type:
<system.webServer>
<handlers>
<add name="js" path="*.js" verb="*" type="System.Web.Handlers.TransferRequestHandler" resourceType="File" preCondition="integratedMode" />
</handlers>
</system.webServer>
This will make IIS try to use the defined routes instead of searching for the static file.

WCF contract mismatch error using Autofac to register endpoint via ChannelFactory

I have a WCF service that works when accessed by a simple MVC application.
When I try to make call on the same endpoint from a different MVC app that's wired up with Autofac I get a binding/contract mismatch exception like this:
Content Type application/soap+xml;
charset=utf-8 was not supported by service http://localhost:6985/ProductService.svc.
The client and service bindings may be mismatched.
System.Net.WebException: The remote server returned an error: (415) Unsupported Media Type.
I'm reasonably confident I do not have a mismatch in the configuration settings on either end, I base this confidence on testing the exact same settings on a WCF + MVC combination where Autofac is not present. The config settings are on pastebin.com/t7wfR77h.
I therefore would like some help analysing if the way I have registered the dependency/endpoint with Autofac is the issue...
*Application_Start* code in MVC app for Autofac setup:
var builder = new ContainerBuilder();
//other registrations...
builder.Register(c =>
new ChannelFactory<IProductService>(
new WSHttpBinding("ProductService_wsHttpBinding"),
new EndpointAddress("http://localhost:6985/ProductService.svc")
)
).SingleInstance();
builder.Register(c =>
{
var factory = c.Resolve<ChannelFactory<IProductService>>();
return factory.CreateChannel();
}
).InstancePerHttpRequest();
var container = builder.Build();
DependencyResolver.SetResolver(new AutofacDependencyResolver(container));
(For completeness) where I make use of this is in a ProductController that has only 1 dependency to be injected, very simple:
public class ProductController : AsyncController
{
private IProductService _service;
public ProductController(IProductService ps)
{
_service = ps;
}
//...
//later simply call
_service.SomeMethod();
}
As mentioned in the comment to #Nick Josevski, I was able to get something similar to work.
In my MVC3 application's Application_Start method, I have the following code:
protected void Application_Start()
{
var builder = new ContainerBuilder();
builder.Register(c => new ChannelFactory<ICartService>("CartService")).SingleInstance();
builder.Register(c => c.Resolve<ChannelFactory<ICartService>>().CreateChannel()).InstancePerHttpRequest();
var container = builder.Build();
DependencyResolver.SetResolver(new AutofacDependencyResolver(container));
// other MVC startup activities, like registering areas and routes
}
These registrations gather the WCF configuration data from Web.config. I've also gotten registrations to work with endpoints defined in code. For completeness, here's some of the pertinent client-side Web.config entries:
<system.serviceModel>
<bindings>
<basicHttpBinding>
<binding name="BasicHttpBinding" ... />
</basicHttpBinding>
</bindings>
<client>
<endpoint address="http://localhost:50930/Purchasing/CartService.svc"
binding="basicHttpBinding" bindingConfiguration="BasicHttpBinding"
contract="CartService.ICartService" name="CartService" />
</client>
</system.serviceModel>
Then, in my controller, I have code like the following:
using Autofac.Features.OwnedInstances;
public class BulkCartController : Controller
{
private readonly Owned<ICartService> cartService_;
public BulkCartController(Owned<ICartService> cartService)
{
cartService_ = cartService;
}
protected override void Dispose(bool disposing) // defined in Controller
{
cartService_.Dispose();
base.Dispose(disposing);
}
//
// GET: /BulkCart/Get/1
public ActionResult Get(int id)
{
var model = new ShoppingCart { ShoppingCartId = id };
using (var cartService = cartService_)
{
model.Items = cartService.Value.GetCartProductItems(id);
}
return View("Get", model);
}
}
Unit testing looks like this:
using Autofac.Features.OwnedInstances;
using Autofac.Util;
using Moq;
[TestMethod]
public void Get_ReturnsItemsInTheGivenCart()
{
var mock = new Mock<ICartService>(MockBehavior.Strict);
mock.Setup(x => x.GetCartProductItems(2)).Returns(new CartProductItemViewObject[0]);
var controller = new BulkCartController(new Owned<ICartService>(mock.Object, new Autofac.Util.Disposable()));
var result = controller.Get(2);
Assert.IsInstanceOfType(result, typeof(ViewResult));
var view = (ViewResult)result;
Assert.AreEqual("Get", view.ViewName);
Assert.IsInstanceOfType(view.ViewData.Model, typeof(ShoppingCart));
var model = (ShoppingCart)view.ViewData.Model;
Assert.AreEqual(2, model.ShoppingCartId);
Assert.AreEqual(0, model.Items.Length);
}
I validate disposal with a unit test defined in an abstract controller test base class:
[TestClass]
public abstract class ControllerWithServiceTestBase<TController, TService>
where TController : Controller
where TService : class
{
[TestMethod]
public virtual void Dispose_DisposesTheService()
{
var disposable = new Mock<IDisposable>(MockBehavior.Strict);
disposable.Setup(x => x.Dispose()).Verifiable();
var controller = (TController) Activator.CreateInstance(typeof(TController), new Owned<TService>(null, disposable.Object));
controller.Dispose();
disposable.Verify();
}
}
One thing I don't know yet is whether this use of Owned<T> and Dispose() gives me adequate disposal, or whether I'll need to use a LifetimeScope as per An Autofac Lifetime Primer.

Using Ninject WCF Extension with WCF Web Service

I have a WCF web service in which I want to use my Repositories and Services which I wish to dependency inject into my WCF web service, however the Ninject WCF Extension example pretty much has a ctor which is instantiating an instance of each dependency, which I don't want, I wanted a purer dependency injection.
Has anyone had any success with using Ninject with WCF, Google seems to return little relevant results for the topics I am looking for.
The code behind for TimeService has:
<%# ServiceHost Language="C#" Debug="true" Service="WcfTimeService.TimeService" CodeBehind="TimeService.svc.cs" **Factory="Ninject.Extensions.Wcf.NinjectServiceHostFactory"** %>
The bastard injection ctors confuse the matter - Ninject will choose the most specific constructor. The general problem with the sample is that it's covering all the bases (IIS Hosted, EXE hosted, Service Hosted), and WCF doesnt exactly make all this stuff easy to manage either (#Ian Davis: I could easily be wrong, can you provide some more detail please , perhaps in the form of a summary of what the samples illustrate in the README, and perhaps more detail in the why of the various cases where you've used BI?)
The way I am currently using Ninject (v3) with my WCF is based on the Ninject WCF extension and Pieter De Rycke's great blog post.
In a nutshell, here's what I'm doing:
1) Via NuGet, I've added a reference to Ninject.Extensions.Wcf into my WCF project. This creates the App_Start folder with NinjectWebCommon.cs, which takes care of initializing Ninject.
2) Typically, you'd set up your Ninject mappings in the CreateKernel method in NinjectWebCommon.cs. However, since I have a MVC3 site in the same solution and want the same Ninject mappings for that site, my CreateKernel looks like this:
private static IKernel CreateKernel()
{
var kernel = new StandardKernel();
kernel.Bind<Func<IKernel>>().ToMethod(ctx => () => new Bootstrapper().Kernel);
kernel.Bind<IHttpModule>().To<HttpApplicationInitializationHttpModule>();
InfrastructureSetup.RegisterServices(kernel);
return kernel;
}
3) In InfrastructureSetup.RegisterServices, I have my Ninject mappings:
public static class InfrastructureSetup
{
public static void RegisterServices(IKernel kernel)
{
kernel.Bind<IRepositoryContext>().To<MyEntityFrameworkContext>().InRequestScope();
kernel.Bind<IFooRepository>().To<FooRepository>().InRequestScope();
kernel.Bind<IBarRepository>().To<BarRepository>().InRequestScope();
// ... and so on. I want InRequestScope() for the EF context, since
// otherwise my repositories (which take IRepositoryContext in their
// constructors) end up getting different EF contexts, messing things up
}
}
4) I want to also inject stuff (IFooService etc.) to my WCF constructors, so I've edited the Web.config for the WCF project with the advice from Pieter De Rycke:
<behaviors>
<serviceBehaviors>
<behavior name="">
<serviceMetadata httpGetEnabled="true" />
<serviceDebug includeExceptionDetailInFaults="false" />
<!-- Add the Ninject behavior to the WCF service. This is needed to support dependency injection to the WCF constructors -->
<ninject />
</behavior>
</serviceBehaviors>
</behaviors>
<extensions>
<behaviorExtensions>
<!-- Add the Ninject behavior extension -->
<add name="ninject"
type="MyWCFProject.Infrastructure.NinjectBehaviorExtensionElement, MyWCFProject, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null" />
</behaviorExtensions>
</extensions>
5) In the MyWCFProject.Infrastructure namespace, I have three files which are basically copy-paste from Pieter:
NinjectBehaviorAttribute.cs:
using System;
using System.Collections.ObjectModel;
using System.ServiceModel;
using System.ServiceModel.Channels;
using System.ServiceModel.Description;
using System.ServiceModel.Dispatcher;
using Ninject.Web.Common;
namespace MyWCFProject.Infrastructure
{
public class NinjectBehaviorAttribute : Attribute, IServiceBehavior
{
public void AddBindingParameters(ServiceDescription serviceDescription, ServiceHostBase serviceHostBase,
Collection<ServiceEndpoint> endpoints, BindingParameterCollection bindingParameters)
{
}
public void ApplyDispatchBehavior(ServiceDescription serviceDescription, ServiceHostBase serviceHostBase)
{
Type serviceType = serviceDescription.ServiceType;
// Set up Ninject to support injecting to WCF constructors
var kernel = new Bootstrapper().Kernel;
IInstanceProvider instanceProvider = new NinjectInstanceProvider(kernel, serviceType);
foreach (ChannelDispatcher dispatcher in serviceHostBase.ChannelDispatchers)
{
foreach (EndpointDispatcher endpointDispatcher in dispatcher.Endpoints)
{
DispatchRuntime dispatchRuntime = endpointDispatcher.DispatchRuntime;
dispatchRuntime.InstanceProvider = instanceProvider;
}
}
}
public void Validate(ServiceDescription serviceDescription, ServiceHostBase serviceHostBase)
{
}
}
}
NinjectBehaviorExtensionElement.cs:
using System;
using System.ServiceModel.Configuration;
namespace MyWCFProject.Infrastructure
{
public class NinjectBehaviorExtensionElement : BehaviorExtensionElement
{
public override Type BehaviorType
{
get { return typeof(NinjectBehaviorAttribute); }
}
protected override object CreateBehavior()
{
return new NinjectBehaviorAttribute();
}
}
}
NinjectInstanceProvider.cs:
using System;
using System.ServiceModel;
using System.ServiceModel.Channels;
using System.ServiceModel.Dispatcher;
using Ninject;
namespace MyWCFProject.Infrastructure
{
public class NinjectInstanceProvider : IInstanceProvider
{
private Type serviceType;
private IKernel kernel;
public NinjectInstanceProvider(IKernel kernel, Type serviceType)
{
this.kernel = kernel;
this.serviceType = serviceType;
}
public object GetInstance(InstanceContext instanceContext)
{
return this.GetInstance(instanceContext, null);
}
public object GetInstance(InstanceContext instanceContext, Message message)
{
return kernel.Get(this.serviceType);
}
public void ReleaseInstance(InstanceContext instanceContext, object instance)
{
}
}
}
At the moment, this solution seems to be working well; dependency injection is working for both the WCF and the MVC3 site, I can request dependencies to be injected to the WCF constructors and the EF context stays around for the duration of the request.