├── .gitmodules
├── CNAME
├── conceptual
├── EF6.PG
│ ├── toc.md
│ └── index.md
├── Npgsql
│ ├── contributing.md
│ ├── diagnostics
│ │ ├── overview.md
│ │ ├── exceptions_notices.md
│ │ ├── metrics.md
│ │ ├── tracing.md
│ │ └── logging.md
│ ├── dev
│ │ ├── tests.md
│ │ └── release-checklist.md
│ ├── index.md
│ ├── large-objects.md
│ ├── wait.md
│ ├── keepalive.md
│ ├── toc.yml
│ ├── installation.md
│ ├── compatibility.md
│ ├── copy.md
│ ├── types
│ │ ├── geojson.md
│ │ ├── nts.md
│ │ └── enums_and_composites.md
│ ├── faq.md
│ ├── performance.md
│ ├── release-notes
│ │ ├── 3.2.md
│ │ └── 3.1.md
│ └── replication.md
└── EFCore.PG
│ ├── mapping
│ ├── general.md
│ ├── enum.md
│ ├── array.md
│ └── range.md
│ ├── toc.yml
│ ├── modeling
│ ├── concurrency.md
│ ├── tables.md
│ └── indexes.md
│ ├── misc
│ ├── database-creation.md
│ ├── other.md
│ └── collations-and-case-sensitivity.md
│ └── release-notes
│ ├── 2.0.md
│ ├── 1.1.md
│ ├── 2.2.md
│ ├── 9.0.md
│ └── 10.0.md
├── img
├── warning.png
├── zipkin.png
├── postgresql.gif
├── logo.svg
└── jetbrains-logo.svg
├── .gitignore
├── favicons
├── favicon.ico
├── favicon-16x16.png
├── favicon-32x32.png
├── mstile-70x70.png
├── mstile-144x144.png
├── mstile-150x150.png
├── mstile-310x150.png
├── mstile-310x310.png
├── apple-touch-icon.png
├── android-chrome-192x192.png
├── android-chrome-512x512.png
├── browserconfig.xml
├── site.webmanifest
└── safari-pinned-tab.svg
├── dev
├── toc.md
├── tests.md
├── index.md
└── build-server.md
├── .github
├── dependabot.yml
└── workflows
│ ├── markdownlint-problem-matcher.json
│ └── build-documentation.yml
├── toc.yml
├── .markdownlint.json
├── README.md
├── .vscode
└── settings.json
├── index.md
├── docfx.json
└── static
└── LegacyDateAndTimeResolverFactory.cs
/.gitmodules:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/CNAME:
--------------------------------------------------------------------------------
1 | www.npgsql.org
--------------------------------------------------------------------------------
/conceptual/EF6.PG/toc.md:
--------------------------------------------------------------------------------
1 | # [Getting Started](index.md)
2 |
--------------------------------------------------------------------------------
/img/warning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/img/warning.png
--------------------------------------------------------------------------------
/img/zipkin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/img/zipkin.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | obj
2 | /EFCore.PG
3 | /Npgsql
4 | /_site
5 | /_exported_templates
6 |
--------------------------------------------------------------------------------
/img/postgresql.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/img/postgresql.gif
--------------------------------------------------------------------------------
/favicons/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/favicon.ico
--------------------------------------------------------------------------------
/favicons/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/favicon-16x16.png
--------------------------------------------------------------------------------
/favicons/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/favicon-32x32.png
--------------------------------------------------------------------------------
/favicons/mstile-70x70.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/mstile-70x70.png
--------------------------------------------------------------------------------
/favicons/mstile-144x144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/mstile-144x144.png
--------------------------------------------------------------------------------
/favicons/mstile-150x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/mstile-150x150.png
--------------------------------------------------------------------------------
/favicons/mstile-310x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/mstile-310x150.png
--------------------------------------------------------------------------------
/favicons/mstile-310x310.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/mstile-310x310.png
--------------------------------------------------------------------------------
/favicons/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/apple-touch-icon.png
--------------------------------------------------------------------------------
/favicons/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/android-chrome-192x192.png
--------------------------------------------------------------------------------
/favicons/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/npgsql/doc/HEAD/favicons/android-chrome-512x512.png
--------------------------------------------------------------------------------
/dev/toc.md:
--------------------------------------------------------------------------------
1 | # [General](index.md)
2 | # [Build Server](build-server.md)
3 | # [Tests](tests.md)
4 | # [Types](types.md)
5 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 |
--------------------------------------------------------------------------------
/toc.yml:
--------------------------------------------------------------------------------
1 | - name: Home
2 | href: index.md
3 | - name: Npgsql
4 | href: conceptual/Npgsql/
5 | - name: Entity Framework Core
6 | href: conceptual/EFCore.PG/
7 | - name: Entity Framework 6
8 | href: conceptual/EF6.PG/
9 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing to Npgsql
2 |
3 | As a general rule, Npgsql makes no attempt to validate what it sends to PostgreSQL. For all cases where PostgreSQL would simply return a reasonable error, we prefer that to happen rather than checking replicating validation checks client-side.
4 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": true,
3 | "MD011": false,
4 | "MD013": false,
5 | "MD024": false,
6 | "MD028": false,
7 | "MD033": { "allowed_elements": ["del", "a", "sup"] },
8 | "MD051": false,
9 | "MD046": {
10 | "style": "fenced"
11 | },
12 | "MD055": false,
13 | "MD056": false,
14 | "MD059": false,
15 | "MD060": false
16 | }
17 |
--------------------------------------------------------------------------------
/favicons/browserconfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | #2b5797
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This is the documentation repo for Npgsql.
2 |
3 | It contains conceptual documentation articles for Npgsql, Npgsql.EntityFrameworkCore.PostgreSQL (AKA EFCore.PG) and EntityFramework6.Npgsql (AKA EF6.PG).
4 |
5 | Note that to properly work, docfx expects to also find the Npgsql and EFCore.PG repos cloned in the repo root - it extracts API documentation from them.
6 |
7 | A Github Actions workflow automatically clones the appropriate repository, rebuilds the entire documentation and pushes the results to live.
8 |
--------------------------------------------------------------------------------
/.github/workflows/markdownlint-problem-matcher.json:
--------------------------------------------------------------------------------
1 | {
2 | "problemMatcher": [
3 | {
4 | "owner": "markdownlint",
5 | "pattern": [
6 | {
7 | "regexp": "^([^:]*):(\\d+):?(\\d+)?\\s([\\w-\\/]*)\\s(.*)$",
8 | "file": 1,
9 | "line": 2,
10 | "column": 3,
11 | "code": 4,
12 | "message": 5
13 | }
14 | ]
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/favicons/site.webmanifest:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Npgsql Documentation",
3 | "short_name": " Npgsql docs",
4 | "icons": [
5 | {
6 | "src": "/android-chrome-192x192.png",
7 | "sizes": "192x192",
8 | "type": "image/png"
9 | },
10 | {
11 | "src": "/android-chrome-512x512.png",
12 | "sizes": "512x512",
13 | "type": "image/png"
14 | }
15 | ],
16 | "theme_color": "#ffffff",
17 | "background_color": "#ffffff",
18 | "display": "standalone"
19 | }
20 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/diagnostics/overview.md:
--------------------------------------------------------------------------------
1 | # Diagnostics overview
2 |
3 | Npgsql provides several ways to analyze what's going on inside Npgsql and to diagnose performance issues. Each has its own dedicated doc page:
4 |
5 | * [**Tracing**](tracing.md) allows collecting information on which queries are executed, including precise timing information on start, end and duration. These events can be collected in a database, searched, graphically explored and otherwise analyzed.
6 | * [**Logging**](logging.md) generates textual information on various events within Npgsql; log levels can be adjusted to collect low-level information, helpful for diagnosing errors.
7 | * [**Metrics**](metrics.md) generates aggregated quantitative data, useful for tracking the performance of your application in realtime and over time (e.g. how many queries are currently being executed in a particular moment).
8 |
9 | For information on the exceptions thrown by Npgsql, and on notices produced by PostgreSQL, [see this page](exceptions_notices.md).
10 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "cSpell.words": [
3 | "Keepalive",
4 | "as",
5 | "citext",
6 | "concurrency",
7 | "daterange",
8 | "enum",
9 | "floatrange",
10 | "has",
11 | "has postgres enum",
12 | "hstore",
13 | "ilike",
14 | "inet",
15 | "jsonb",
16 | "jsonb typeof",
17 | "keepalives",
18 | "linq",
19 | "macaddr",
20 | "nextval",
21 | "noda",
22 | "noda time",
23 | "npgsql",
24 | "nuget",
25 | "numrange",
26 | "postgis",
27 | "postgres",
28 | "postgresql",
29 | "regconfig",
30 | "rowversion",
31 | "setweight",
32 | "tablespace",
33 | "tablespaces",
34 | "time",
35 | "timetz",
36 | "to",
37 | "to tsquery",
38 | "to tsvector",
39 | "token",
40 | "trunc",
41 | "tsquery",
42 | "tsrange",
43 | "tstzrange",
44 | "tsvector",
45 | "typeof",
46 | "use",
47 | "use xmin as concurrency token",
48 | "xmin"
49 | ]
50 | }
--------------------------------------------------------------------------------
/dev/tests.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: page
3 | title: Tests
4 | ---
5 |
6 | ## Overview
7 |
8 | Npgsql comes with an extensive test suite to make sure no regressions occur. All tests are run on our build server on all supported .NET versions (including a recent version of mono) and all supported PostgreSQL backends.
9 |
10 | There is also a growing suite of speed tests to be able to measure performance. These tests are currently marked [Explicit] and aren't executed automatically.
11 |
12 | ## Simple setup
13 |
14 | The Npgsql test suite requires a PostgreSQL backend to test against. Simply use the latest version of PostgreSQL on your dev machine on the default port (5432).
15 | By default, all tests will be run using user *npgsql_tests*, and password *npgsql_tests*. Npgsql will automatically create a database called *npgsql_tests* and
16 | run its tests against this.
17 |
18 | To set this up, connect to PostgreSQL as the admin user as follows:
19 |
20 | ```
21 | psql -h localhost -U postgres
22 |
23 | create user npgsql_tests password 'npgsql_tests' superuser;
24 | ```
25 |
26 | And you're done.
27 |
28 | Superuser access is needed for some tests, e.g. loading the `hstore` extension, creating and dropping test databases in the Entity Framework tests...
29 |
--------------------------------------------------------------------------------
/dev/index.md:
--------------------------------------------------------------------------------
1 | ## Tests
2 |
3 | We maintain a large regression test suite, if you're planning to submit code, please provide a test
4 | that reproduces the bug or tests your new feature. See [this page](tests.md) for information on the
5 | Npgsql test suite.
6 |
7 | ## Build Server
8 |
9 | We have a [TeamCity build server](https://www.jetbrains.com/teamcity/) running continuous integration builds
10 | on commits pushed to our github repository. The Npgsql testsuite is executed over all officially supported
11 | PostgreSQL versions to catch errors as early as possible. CI NuGet packages are automatically pushed to our
12 | [unstable feed at MyGet](https://www.myget.org/F/npgsql-unstable).
13 |
14 | For some information about the build server setup, see [this page](build-server.md).
15 |
16 | Thanks to Dave Page at PostgreSQL for donating a VM for this!
17 |
18 | ## Release Checklist
19 |
20 | These are the steps needed to publish release 3.0.6:
21 |
22 | * Merge --no-ff hotfix/3.0.6 into master
23 | * Tag master with v3.0.6
24 | * Push both master and v3.0.6 to Github
25 | * Wait for the build to complete
26 | * In TeamCity, go to the artifacts for the build and download them all as a single ZIP
27 | * Nuget push the packages
28 | * Write release notes on npgsql.org, publish
29 | * Create release on github, pointing to npgsql.org
30 | * Upload MSI to the github release
31 | * Delete hotfix/3.0.6 both locally and on github
32 | * Create new branch hotfix/3.0.7 off of master, push to github
33 | * Close the Github 3.0.6 milestone, create new 3.0.7 milestone
34 | * Twitter
35 |
36 | ## Other stuff
37 |
38 | Emil compiled [a list of PostgreSQL types and their wire representations](types.md).
39 |
40 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/dev/tests.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: page
3 | title: Tests
4 | ---
5 |
6 | ## Overview
7 |
8 | Npgsql has an extensive test suite to guard against regressions. The test suite is run on the official build server for the .NET Framework and .NET Core with all supported PostgreSQL backends.
9 |
10 | Continuous integration results are publicly available via [Github Actions](https://github.com/npgsql/npgsql/actions).
11 |
12 | ## Getting Started
13 |
14 | ### Setup PostgreSQL
15 |
16 | The Npgsql test suite requires a PostgreSQL backend for tests to run. By default, the test suite expects PostgreSQL to be running on the local machine with the default port (5432).
17 |
18 | 1. Install PostgreSQL:
19 | 2. Start the PostgreSQL backend.
20 |
21 | ### Create the `npgsql_tests` account
22 |
23 | By default, the test suite expects an account named `npgsql_tests` with a password of `npgsql_tests`. This account is used by the test suite to create a database named `npgsql_tests` and run the tests.
24 |
25 | ```bash
26 | $ psql -h localhost -U postgres
27 | postgres=# CREATE USER npgsql_tests PASSWORD 'npgsql_tests' SUPERUSER;
28 | postgres=# CREATE DATABASE npgsql_tests OWNER npgsql_tests;
29 | ```
30 |
31 | _Note: superuser access is required to create and drop test databases, load extensions (e.g. `hstore`, `postgis`), etc._
32 |
33 | ### Clone the repository
34 |
35 | ```bash
36 | cd ~
37 | git clone git@github.com:npgsql/npgsql.git (use ssh)
38 | git clone https://github.com/npgsql/npgsql.git (use https)
39 | ```
40 |
41 | ### Run the test suite
42 |
43 | ```bash
44 | cd ~/npgsql
45 | dotnet test ./test/Npgsql.Tests
46 | dotnet test ./test/Npgsql.PluginTests
47 | dotnet test ./test/Npgsql.Benchmarks
48 | ```
49 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/index.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | [](https://www.nuget.org/packages/Npgsql/)
4 | [](https://www.myget.org/feed/npgsql/package/nuget/Npgsql)
5 | [](https://www.myget.org/feed/npgsql-vnext/package/nuget/Npgsql)
6 | [](https://github.com/npgsql/npgsql/actions)
7 |
8 | The best way to use Npgsql is to install its [nuget package](https://www.nuget.org/packages/Npgsql/).
9 |
10 | Npgsql aims to be fully ADO.NET-compatible, its API should feel almost identical to other .NET database drivers.
11 |
12 | Here's a basic code snippet to get you started:
13 |
14 | ```csharp
15 | var connectionString = "Host=myserver;Username=mylogin;Password=mypass;Database=mydatabase";
16 | await using var dataSource = NpgsqlDataSource.Create(connectionString);
17 |
18 | // Insert some data
19 | await using (var cmd = dataSource.CreateCommand("INSERT INTO data (some_field) VALUES ($1)"))
20 | {
21 | cmd.Parameters.AddWithValue("Hello world");
22 | await cmd.ExecuteNonQueryAsync();
23 | }
24 |
25 | // Retrieve all rows
26 | await using (var cmd = dataSource.CreateCommand("SELECT some_field FROM data"))
27 | await using (var reader = await cmd.ExecuteReaderAsync())
28 | {
29 | while (await reader.ReadAsync())
30 | {
31 | Console.WriteLine(reader.GetString(0));
32 | }
33 | }
34 | ```
35 |
36 | You can find more info about the ADO.NET API in the [MSDN docs](https://msdn.microsoft.com/en-us/library/h43ks021(v=vs.110).aspx) or in many tutorials on the Internet.
37 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/mapping/general.md:
--------------------------------------------------------------------------------
1 | # Type mapping
2 |
3 | The EF Core provider transparently maps the types supported by Npgsql at the ADO.NET level - see [the Npgsql ADO type mapping page](/doc/types/basic.html).
4 |
5 | This means that you can use PostgreSQL-specific types, such as `inet` or `circle`, directly in your entities. Simply define your properties just as if they were a simple type, such as a `string`:
6 |
7 | ```csharp
8 | public class MyEntity
9 | {
10 | public int Id { get; set; }
11 | public string Name { get; set; }
12 | public IPAddress IPAddress { get; set; }
13 | public NpgsqlCircle Circle { get; set; }
14 | public int[] SomeInts { get; set; }
15 | }
16 | ```
17 |
18 | Special types such as [arrays](array.md) and [enums](enum.md) have their own documentation pages with more details.
19 |
20 | [PostgreSQL composite types](https://www.postgresql.org/docs/current/static/rowtypes.html), while supported at the ADO.NET level, aren't yet supported in the EF Core provider. This is tracked by [#22](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/22).
21 |
22 | ## Explicitly specifying data types
23 |
24 | In some cases, your .NET property type can be mapped to several PostgreSQL data types; a good example is a `string`, which will be mapped to `text` by default, but can also be mapped to `jsonb`. You can use either Data Annotation attributes or the Fluent API to configure the PostgreSQL data type:
25 |
26 | ## [Data Annotations](#tab/data-annotations)
27 |
28 | ```csharp
29 | [Column(TypeName="jsonb")]
30 | public string SomeStringProperty { get; set; }
31 | ```
32 |
33 | ## [Fluent API](#tab/fluent-api)
34 |
35 | ```csharp
36 | builder.Entity()
37 | .Property(b => b.SomeStringProperty)
38 | .HasColumnType("jsonb");
39 | ```
40 |
41 | ***
42 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/large-objects.md:
--------------------------------------------------------------------------------
1 | # Large Objects
2 |
3 | The Large Objects feature is a way of storing large files in a PostgreSQL database. Files can normally be stored in bytea columns but there are two downsides; a file can only be 1 GB and the backend buffers the whole file when reading or writing a column, which may use significant amounts of RAM on the backend.
4 |
5 | With the Large Objects feature, objects are instead stored in a separate system table in smaller chunks and provides a streaming API for the user. Each object is given an integral identifier that is used for accessing the object, that can, for example, be stored in a user's table containing information about this object.
6 |
7 | ## Example
8 |
9 | ```csharp
10 | // Retrieve a Large Object Manager for this connection
11 | var manager = new NpgsqlLargeObjectManager(Conn);
12 |
13 | // Create a new empty file, returning the identifier to later access it
14 | uint oid = manager.Create();
15 |
16 | // Reading and writing Large Objects requires the use of a transaction
17 | using (var transaction = Conn.BeginTransaction())
18 | {
19 | // Open the file for reading and writing
20 | using (var stream = manager.OpenReadWrite(oid))
21 | {
22 | var buf = new byte[] { 1, 2, 3 };
23 | stream.Write(buf, 0, buf.Length);
24 | stream.Seek(0, System.IO.SeekOrigin.Begin);
25 |
26 | var buf2 = new byte[buf.Length];
27 | stream.Read(buf2, 0, buf2.Length);
28 |
29 | // buf2 now contains 1, 2, 3
30 | }
31 | // Save the changes to the object
32 | transaction.Commit();
33 | }
34 | ```
35 |
36 | ## See also
37 |
38 | See the [PostgreSQL documentation](http://www.postgresql.org/docs/current/static/largeobjects.html) for more information. All functionality are implemented and wrapped in the classes `NpgsqlLargeObjectManager` and `NpgsqlLargeObjectStream` using standard .NET Stream as base class.
39 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/toc.yml:
--------------------------------------------------------------------------------
1 | - name: Getting started
2 | href: index.md
3 | - name: Release notes
4 | items:
5 | - name: "10.0 (rc)"
6 | href: release-notes/10.0.md
7 | - name: "9.0"
8 | href: release-notes/9.0.md
9 | - name: "8.0"
10 | href: release-notes/8.0.md
11 | - name: Out of support
12 | items:
13 | - name: "7.0"
14 | href: release-notes/7.0.md
15 | - name: "6.0"
16 | href: release-notes/6.0.md
17 | - name: "5.0"
18 | href: release-notes/5.0.md
19 | - name: "3.1"
20 | href: release-notes/3.1.md
21 | - name: "2.2"
22 | href: release-notes/2.2.md
23 | - name: "2.1"
24 | href: release-notes/2.1.md
25 | - name: "2.0"
26 | href: release-notes/2.0.md
27 | - name: "1.1"
28 | href: release-notes/1.1.md
29 | - name: Creating a model
30 | items:
31 | - name: Tables
32 | href: modeling/tables.md
33 | - name: Generated properties
34 | href: modeling/generated-properties.md
35 | - name: Indexes
36 | href: modeling/indexes.md
37 | - name: Concurrency tokens
38 | href: modeling/concurrency.md
39 | - name: Mapping and translation
40 | items:
41 | - name: General
42 | href: mapping/general.md
43 | - name: Translations
44 | href: mapping/translations.md
45 | - name: JSON
46 | href: mapping/json.md
47 | - name: Spatial data
48 | href: mapping/nts.md
49 | - name: Full text search
50 | href: mapping/full-text-search.md
51 | - name: NodaTime
52 | href: mapping/nodatime.md
53 | - name: Arrays
54 | href: mapping/array.md
55 | - name: Enums
56 | href: mapping/enum.md
57 | - name: Ranges
58 | href: mapping/range.md
59 | - name: Miscellaneous
60 | items:
61 | - name: Collations and case sensitivity
62 | href: misc/collations-and-case-sensitivity.md
63 | - name: Database creation
64 | href: misc/database-creation.md
65 | - name: Other
66 | href: misc/other.md
67 | - name: API reference
68 | href: "../../obj/api/EFCore.PG/"
69 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/modeling/concurrency.md:
--------------------------------------------------------------------------------
1 | # Concurrency Tokens
2 |
3 | > [!NOTE]
4 | > Please read the general [Entity Framework Core docs on concurrency tokens](https://docs.microsoft.com/en-us/ef/core/modeling/concurrency).
5 |
6 | Entity Framework Core supports the concept of optimistic concurrency - a property on your entity is designated as a concurrency token, and EF Core detects concurrent modifications by checking whether that token has changed since the entity was read.
7 |
8 | ## The PostgreSQL xmin system column
9 |
10 | Although applications can update concurrency tokens themselves, we frequently rely on the database automatically updating a column on update - a "last modified" timestamp, an SQL Server `rowversion`, etc. Unfortunately PostgreSQL doesn't have such auto-updating columns - but there is one feature that can be used for concurrency token. All PostgreSQL tables have a set of [implicit and hidden system columns](https://www.postgresql.org/docs/current/ddl-system-columns.html), among which `xmin` holds the ID of the latest updating transaction. Since this value automatically gets updated every time the row is changed, it is ideal for use as a concurrency token.
11 |
12 | You can map a `uint` property to the PostgreSQL `xmin` system column using the standard EF Core mechanisms:
13 |
14 | ### [Data Annotations](#tab/data-annotations)
15 |
16 | ```csharp
17 | public class SomeEntity
18 | {
19 | public int Id { get; set; }
20 |
21 | [Timestamp]
22 | public uint Version { get; set; }
23 | }
24 | ```
25 |
26 | ### [Fluent API](#tab/fluent-api)
27 |
28 | ```csharp
29 | class MyContext : DbContext
30 | {
31 | public DbSet SomeEntities { get; set; }
32 |
33 | protected override void OnModelCreating(ModelBuilder modelBuilder)
34 | {
35 | modelBuilder.Entity()
36 | .Property(b => b.Version)
37 | .IsRowVersion();
38 | }
39 | }
40 |
41 | public class SomeEntity
42 | {
43 | public int Id { get; set; }
44 | public uint Version { get; set; }
45 | }
46 | ```
47 |
48 | ***
49 |
--------------------------------------------------------------------------------
/index.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | # Npgsql - .NET Access to PostgreSQL
8 |
9 | ## About
10 |
11 | Npgsql is an open source ADO.NET Data Provider for PostgreSQL, it allows programs written in C#, Visual Basic, F# to access the PostgreSQL database server.
12 | It is implemented in 100% C# code, is free and is open source.
13 |
14 | An Entity Framework Core provider is also available, and exposes some features unique to the PostgreSQL database to EF Core users.
15 |
16 | Finally, a legacy Entity Framework 6.x (non-Core) provider is also available, but is no longer being actively maintained.
17 |
18 | ## Getting Help
19 |
20 | The best way to get help for Npgsql is to post a question to Stack Overflow and tag it with the `npgsql` tag.
21 | If you think you've encountered a bug or want to request a feature, open an issue in the [appropriate project's github repository](https://github.com/npgsql).
22 |
23 | ## License
24 |
25 | Npgsql is licensed under the [PostgreSQL License](https://github.com/npgsql/npgsql/blob/main/LICENSE), a liberal OSI-approved open source license.
26 |
27 | ## Contributors
28 |
29 | Current active contributors to Npgsql are:
30 |
31 | * Shay Rojansky ([@roji](https://github.com/roji))
32 | * Nikita Kazmin ([@vonzshik](https://github.com/vonzshik))
33 | * Nino Floris ([@NinoFloris](https://github.com/NinoFloris/))
34 | * Brar Piening ([@Brar](https://github.com/Brar))
35 |
36 | Past contributors to Npgsql:
37 |
38 | * Emmanuel André
39 | * Jon Asher
40 | * Raif Atef
41 | * Josh Cooley
42 | * Yoh Deadfall
43 | * Austin Drenski
44 | * Francisco Figueiredo Jr. ([@franciscojunior](https://github.com/franciscojunior)) Npgsql creator and former lead developer for versions 0.xx, 1.xx and 2.xx
45 | * Federico Di Gregorio
46 | * Jon Hanna
47 | * Emil Lenngren
48 | * Chris Morgan
49 | * Dave Page
50 | * Glen Parker
51 | * Hiroshi Saito
52 | * Kenji Uno
53 | * Warcha
54 |
55 | ## Thanks
56 |
57 | A special thanks to [Jetbrains](http://jetbrains.com/) for donating licenses to the project.
58 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/misc/database-creation.md:
--------------------------------------------------------------------------------
1 | # Database Creation
2 |
3 | ## Specifying the administrative db
4 |
5 | When the Npgsql EF Core provider creates or deletes a database (`EnsureCreated()`, `EnsureDeleted()`), it must connect to an administrative database which already exists (with PostgreSQL you always have to be connected to some database, even when creating/deleting another database). Up to now the `postgres` database was used, which is supposed to always be present.
6 |
7 | However, there are some PostgreSQL-like databases where the `postgres` database is not available. For these cases you can specify the administrative database as follows:
8 |
9 | ```csharp
10 | protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
11 | => optionsBuilder.UseNpgsql(
12 | "",
13 | options => options.UseAdminDatabase("my_admin_db"));
14 | ```
15 |
16 | ## Using a database template
17 |
18 | When creating a new database,
19 | [PostgreSQL allows specifying another "template database"](http://www.postgresql.org/docs/current/static/manage-ag-templatedbs.html)
20 | which will be copied as the basis for the new one. This can be useful for including database entities which are not managed by Entity Framework Core. You can trigger this by using `HasDatabaseTemplate` in your context's `OnModelCreating`:
21 |
22 | ```csharp
23 | protected override void OnModelCreating(ModelBuilder modelBuilder)
24 | => modelBuilder.UseDatabaseTemplate("my_template_db");
25 | ```
26 |
27 | ## Setting a tablespace
28 |
29 | PostgreSQL allows you to locate your database in different parts of your filesystem, [via tablespaces](https://www.postgresql.org/docs/current/static/manage-ag-tablespaces.html). The Npgsql EF Core provider allows you to specify your database's namespace:
30 |
31 | ```csharp
32 | protected override void OnModelCreating(ModelBuilder modelBuilder)
33 | => modelBuilder.UseTablespace("my_tablespace");
34 | ```
35 |
36 | You must have created your tablespace prior to this via the `CREATE TABLESPACE` command - the Npgsql EF Core provider does not do this for you. Note also that specifying a tablespace on specific tables is not supported.
37 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/modeling/tables.md:
--------------------------------------------------------------------------------
1 | # Tables
2 |
3 | ## Naming
4 |
5 | By default, EF Core will map to tables and columns named exactly after your .NET classes and properties, so an entity type named `BlogPost` will be mapped to a PostgreSQL table called `BlogPost`. While there's nothing wrong with that, the PostgreSQL world tends towards snake_case naming instead. In addition, any upper-case letters in unquoted identifiers are automatically converted to lower-case identifiers, so the Npgsql provider generates quotes around all such identifiers.
6 |
7 | The [EFCore.NamingConventions](https://github.com/efcore/EFCore.NamingConventions) plugin to automatically set all your table and column names to snake_case instead:
8 |
9 | ```csharp
10 | protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
11 | => optionsBuilder
12 | .UseNpgsql(...)
13 | .UseSnakeCaseNamingConvention();
14 |
15 | public class Customer {
16 | public int Id { get; set; }
17 | public string FullName { get; set; }
18 | }
19 | ```
20 |
21 | This will cause cleaner SQL such as the following to be generated:
22 |
23 | ```sql
24 | CREATE TABLE customers (
25 | id integer NOT NULL GENERATED BY DEFAULT AS IDENTITY,
26 | full_name text NULL,
27 | CONSTRAINT "PK_customers" PRIMARY KEY (id);
28 |
29 | SELECT c.id, c.full_name
30 | FROM customers AS c
31 | WHERE c.full_name = 'John Doe';
32 | ```
33 |
34 | However, note that the EFCore.NamingConventions isn't an official part of EF (or of the PostgreSQL provider), and sometimes lags behind in terms of support.
35 |
36 | See the [plugin documentation](https://github.com/efcore/EFCore.NamingConventions) for more details,
37 |
38 | ## Storage parameters
39 |
40 | PostgreSQL allows configuring tables with *storage parameters*, which can tweak storage behavior in various ways; [see the PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-STORAGE-PARAMETERS) for more information.
41 |
42 | To configure a storage parameter on a table, use the following code:
43 |
44 | ```csharp
45 | protected override void OnModelCreating(ModelBuilder modelBuilder)
46 | => modelBuilder.Entity().HasStorageParameter("fillfactor", 70);
47 | ```
48 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/wait.md:
--------------------------------------------------------------------------------
1 | # Waiting for Notifications
2 |
3 | Note: *This functionality replaces Npgsql 3.0's "Continuous processing mode"*.
4 |
5 | ## PostgreSQL Asynchronous messages
6 |
7 | PostgreSQL has a feature whereby arbitrary notification messages can be sent between clients. For example, one client may wait until it is
8 | notified by another client of a task that it is supposed to perform. Notifications are, by their nature, asynchronous - they can arrive
9 | at any point. For more detail about this feature, see the PostgreSQL [NOTIFY command](http://www.postgresql.org/docs/current/static/sql-notify.html).
10 | Some other asynchronous message types are notices (e.g. database shutdown imminent) and parameter changes, see the
11 | [PostgreSQL protocol docs](http://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-ASYNC) for more details.
12 |
13 | Note that despite the word "asynchronous", this page has nothing to do with ADO.NET async operations (e.g. ExecuteReaderAsync).
14 |
15 | ## Processing of Notifications
16 |
17 | Npgsql exposes notification messages via the `Notification` event on NpgsqlConnection.
18 |
19 | Since asynchronous notifications are rarely used and processing can be complex, Npgsql only processes notification messages as
20 | part of regular (synchronous) query interaction. That is, if an asynchronous notification is sent, Npgsql will only process it and emit an
21 | event to the user the next time a command is sent and processed.
22 |
23 | To receive notifications outside a synchronous request-response cycle, call `NpgsqlConnection.Wait()`. This will make your thread block
24 | until a single notification is received (note that a version with a timeout as well as an async version exist). Note that the notification
25 | is still delivered via the `Notification` event as before.
26 |
27 | ```csharp
28 | var conn = new NpgsqlConnection(ConnectionString);
29 | conn.Open();
30 | conn.Notification += (o, e) => Console.WriteLine("Received notification");
31 |
32 | using (var cmd = new NpgsqlCommand("LISTEN channel_name", conn)) {
33 | cmd.ExecuteNonQuery();
34 | }
35 |
36 | while (true) {
37 | conn.Wait(); // Thread will block here
38 | }
39 | ```
40 |
41 | ---
42 |
43 | ## Keepalive
44 |
45 | You may want to turn on [keepalives](keepalive.md).
46 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/keepalive.md:
--------------------------------------------------------------------------------
1 | # Keepalive
2 |
3 | Some clients keep idle connections for long periods of time - this is especially frequent when waiting for PostgreSQL notifications.
4 | In this scenario, how can the client know the connection is still up, and hasn't been broken by a server or network outage?
5 | For this purpose, Npgsql has a keepalive feature, which makes it initiate periodic ping roundtrips.
6 | This feature is by default disabled, and must be enabled via the
7 | [Keepalive](connection-string-parameters.md#timeouts-and-keepalive) connection string parameter, setting the number of seconds between each keepalive.
8 |
9 | When keepalive is enabled, Npgsql will emit an
10 | [`NpgsqlConnection.StateChange`](https://msdn.microsoft.com/en-us/library/system.data.common.dbconnection.statechange(v=vs.110).aspx)
11 | event if the keepalive fails.
12 |
13 | Note that you should only turn this feature on if you need it. Unless you know you'll have long-lived idle connections, and that your
14 | backend (or network equipment) will interfere with these connections, it's better to leave this off.
15 |
16 | ## TCP Keepalives
17 |
18 | The keepalive mechanism above is ideal for long-standing idle connections, but it cannot be used during query processing. With some PostgreSQL-like data warehouse products such as [Amazon Redshift](http://docs.aws.amazon.com/redshift/latest/mgmt/welcome.html) or [Greenplum](http://greenplum.org/), it is not uncommon for a single SQL statement to take a long time to execute, and during that time it is not possible to send application-level pings. For these cases you may want to turn on *TCP keepalive*, which is quite different from the application-level keepalive described above. To better understand the different kinds of keepalives, see [this blog post](http://blog.stephencleary.com/2009/05/detection-of-half-open-dropped.html). As that article explains, TCP keepalive depends on networking stack support and might not always work, but it is your only option during query processing.
19 |
20 | On Linux, you turn keepalives simply by specifying `Tcp Keepalive=true` in your connection string. The default system-wide settings will be used (for interval, count...) - it is currently impossible to specify these at the application level. On Windows, you can also specify `Tcp Keepalive Time` and `Tcp Keepalive Interval` to tweak these settings.
21 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/diagnostics/exceptions_notices.md:
--------------------------------------------------------------------------------
1 | # Exceptions, errors and notices
2 |
3 | ## Exception types
4 |
5 | Most exceptions thrown by Npgsql are either of type , or wrapped by one; this allows your application to catch `NpgsqlException` where appropriate, for all database-related errors. Note that `NpgsqlException` is a sub-class of the general [System.Data.DbException](https://docs.microsoft.com/dotnet/api/system.data.common.dbexception), so if your application uses more than one database type, you can catch that as well.
6 |
7 | When Npgsql itself encounters an error, it typically raises that as an `NpgsqlException` directly, possibly wrapping an inner exception. For example, if a networking error occurs while communicating with PostgreSQL, Npgsql will raise an `NpgsqlException` wrapping an `IOException`; this allow you both to identify the root cause of the problem, while still identifying it as database-related.
8 |
9 | In other cases, PostgreSQL itself will report an error to Npgsql; Npgsql raises these by throwing a [PostgresException](xref:Npgsql.PostgresException), which is a sub-class of `NpgsqlException` adding important contextual information on the error. Most importantly, `PostgresException` exposes the [SqlState](xref:Npgsql.PostgresException.SqlState) property, which contains the [PostgreSQL error code](https://www.npgsql.org/doc/api/Npgsql.PostgresException.html#Npgsql_PostgresException_SqlState). This value can be consulted to identify which error type occurred.
10 |
11 | When executing multiple commands via , the property references the command within the batch which triggered the exception. This allows you to understand exactly what happened, and access the specific SQL which triggered the error.
12 |
13 | ## PostgreSQL notices
14 |
15 | Finally, PostgreSQL also raises "notices", which contain non-critical information on command execution. Notices are not errors: they do not indicate failure and can be safely ignored, although they may contain valuable information on the execution of your commands.
16 |
17 | Npgsql logs notices in the *debug* logging level. To deal with notices programmatically, Npgsql also exposes the event, which you can hook into for any further processing:
18 |
19 | ```csharp
20 | conn.Notice += (_, args) => Console.WriteLine(args.Notice.MessageText);
21 | ```
22 |
--------------------------------------------------------------------------------
/.github/workflows/build-documentation.yml:
--------------------------------------------------------------------------------
1 | name: Build Documentation
2 |
3 | on:
4 | push:
5 | branches: [main]
6 |
7 | pull_request:
8 |
9 | # Used to trigger the flow from Npgsql/EFCore.PG via HTTP POST
10 | repository_dispatch:
11 |
12 | # Allows you to run this workflow manually from the Actions tab
13 | workflow_dispatch:
14 |
15 | concurrency:
16 | group: "pages"
17 | cancel-in-progress: false
18 |
19 | jobs:
20 | build:
21 | runs-on: ubuntu-24.04
22 |
23 | steps:
24 | - name: Checkout repo
25 | uses: actions/checkout@v6
26 |
27 | - name: Use Node.js
28 | uses: actions/setup-node@v6.1.0
29 | with:
30 | node-version: 20.x
31 |
32 | - name: Run Markdownlint
33 | run: |
34 | echo "::add-matcher::.github/workflows/markdownlint-problem-matcher.json"
35 | npm i -g markdownlint-cli
36 | markdownlint "conceptual/**/*.md"
37 |
38 | # Setup software
39 | - name: Setup .NET Core
40 | uses: actions/setup-dotnet@v5.0.1
41 | with:
42 | dotnet-version: 10.0.x
43 |
44 | - name: Checkout Npgsql
45 | uses: actions/checkout@v6
46 | with:
47 | repository: npgsql/npgsql
48 | ref: docs
49 | path: Npgsql
50 |
51 | # docfx has issues specifically with analyzer/sourcegen projects; build manually before.
52 | - name: Build Npgsql
53 | run: dotnet build -c Release
54 | shell: bash
55 | working-directory: Npgsql
56 |
57 | - name: Checkout EFCore.PG
58 | uses: actions/checkout@v6
59 | with:
60 | repository: npgsql/Npgsql.EntityFrameworkCore.PostgreSQL
61 | ref: docs
62 | path: EFCore.PG
63 |
64 | - name: Build EFCore.PG
65 | run: dotnet build -c Release
66 | shell: bash
67 | working-directory: EFCore.PG
68 |
69 | - name: Get docfx
70 | run: dotnet tool install --version 2.78.4 -g docfx
71 |
72 | - name: Build docs
73 | run: docfx --warningsAsErrors
74 |
75 | - name: Upload artifact
76 | uses: actions/upload-pages-artifact@v4
77 | with:
78 | path: _site
79 |
80 | deploy:
81 | needs: build
82 | runs-on: ubuntu-latest
83 | if: github.event_name == 'push' && github.ref_name == 'main'
84 | permissions:
85 | contents: read
86 | pages: write
87 | id-token: write
88 | environment:
89 | name: github-pages
90 | url: ${{ steps.deployment.outputs.page_url }}
91 | steps:
92 | - name: Deploy to GitHub Pages
93 | id: deployment
94 | uses: actions/deploy-pages@v4
95 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/toc.yml:
--------------------------------------------------------------------------------
1 | - name: Getting Started
2 | href: index.md
3 | - name: Release notes
4 | items:
5 | - name: "10.0"
6 | href: release-notes/10.0.md
7 | - name: "9.0"
8 | href: release-notes/9.0.md
9 | - name: "8.0"
10 | href: release-notes/8.0.md
11 | - name: Out of support
12 | items:
13 | - name: "7.0"
14 | href: release-notes/7.0.md
15 | - name: "6.0"
16 | href: release-notes/6.0.md
17 | - name: "5.0"
18 | href: release-notes/5.0.md
19 | - name: "4.1"
20 | href: release-notes/4.1.md
21 | - name: "4.0"
22 | href: release-notes/4.0.md
23 | - name: "3.2"
24 | href: release-notes/3.2.md
25 | - name: "3.1"
26 | href: release-notes/3.1.md
27 | - name: "3.0"
28 | href: release-notes/3.0.md
29 | - name: Installation
30 | href: installation.md
31 | - name: Basic usage
32 | href: basic-usage.md
33 | - name: Connection string parameters
34 | href: connection-string-parameters.md
35 | - name: Security and encryption
36 | href: security.md
37 | - name: Type mapping
38 | items:
39 | - name: Basic type mapping
40 | href: types/basic.md
41 | - name: Date and time
42 | href: types/datetime.md
43 | - name: Enums and composites
44 | href: types/enums_and_composites.md
45 | - name: Spatial
46 | items:
47 | - name: NetTopologySuite
48 | href: types/nts.md
49 | - name: GeoJSON
50 | href: types/geojson.md
51 | - name: JSON
52 | href: types/json.md
53 | - name: NodaTime
54 | href: types/nodatime.md
55 | - name: Diagnostics
56 | items:
57 | - name: Overview
58 | href: diagnostics/overview.md
59 | - name: Tracing
60 | href: diagnostics/tracing.md
61 | - name: Logging
62 | href: diagnostics/logging.md
63 | - name: Metrics
64 | href: diagnostics/metrics.md
65 | - name: Exceptions and notices
66 | href: diagnostics/exceptions_notices.md
67 | - name: Performance
68 | href: performance.md
69 | - name: Prepared statements
70 | href: prepare.md
71 | - name: Advanced topics
72 | items:
73 | - name: Failover and load balancing
74 | href: failover-and-load-balancing.md
75 | - name: Bulk copy
76 | href: copy.md
77 | - name: Waiting for notifications
78 | href: wait.md
79 | - name: Keepalive
80 | href: keepalive.md
81 | - name: Replication
82 | href: replication.md
83 | - name: Compatibility notes
84 | href: compatibility.md
85 | - name: FAQ
86 | href: faq.md
87 | - name: API reference
88 | href: "https://www.npgsql.org/doc/api/Npgsql.html"
89 | - name: Developer notes
90 | items:
91 | - name: Release checklist
92 | href: dev/release-checklist.md
93 | - name: Tests
94 | href: dev/tests.md
95 | - name: Type representations
96 | href: dev/type-representations.md
97 |
--------------------------------------------------------------------------------
/docfx.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata":
3 | [
4 | {
5 | "src":
6 | [
7 | {
8 | "files": [ "src/Npgsql/Npgsql.csproj" ],
9 | "exclude": [ "src/MSI/**", "src/VSIX/**", "**/bin/**", "**/obj/**" ],
10 | "src": "Npgsql/"
11 | }
12 | ],
13 | "dest": "obj/api/Npgsql",
14 | "properties":
15 | {
16 | "TargetFramework": "net10.0"
17 | }
18 | },
19 | {
20 | "src":
21 | [
22 | {
23 | "files": [ "src/**/*.csproj" ],
24 | "exclude": [ "**/bin/**", "**/obj/**", "Properties/NpgsqlStrings.*" ],
25 | "src": "EFCore.PG"
26 | }
27 | ],
28 | "dest": "obj/api/EFCore.PG",
29 | "properties":
30 | {
31 | "TargetFramework": "net10.0"
32 | }
33 | }
34 | ],
35 | "build":
36 | {
37 | "template":
38 | [
39 | "default",
40 | "modern"
41 | ],
42 | "content":
43 | [
44 | {
45 | "files": [ "**/*.yml" ],
46 | "src": "obj/api/Npgsql",
47 | "dest": "doc/api"
48 | },
49 | {
50 | "files": [ "**/*.yml" ],
51 | "src": "obj/api/EFCore.PG",
52 | "dest": "efcore/api"
53 | },
54 | {
55 | "files": [ "**.md", "toc.yml" ],
56 | "src": "conceptual/Npgsql",
57 | "dest": "doc"
58 | },
59 | {
60 | "files": [ "**.md", "toc.yml" ],
61 | "src": "conceptual/EFCore.PG",
62 | "dest": "efcore"
63 | },
64 | {
65 | "files": [ "**.md", "toc.yml" ],
66 | "src": "conceptual/EF6.PG",
67 | "dest": "ef6"
68 | },
69 | {
70 | "files": [ "*.md", "dev/**.md", "toc.yml" ]
71 | }
72 | ],
73 | "resource":
74 | [
75 | {
76 | "files": [ "img/**", "styles/**", "static/**", "CNAME" ]
77 | },
78 | {
79 | "files": [ "**" ],
80 | "src": "favicons"
81 | }
82 | ],
83 | "output": "_site",
84 | "xrefService": [ "https://xref.docs.microsoft.com/query?uid={uid}" ],
85 | "globalMetadata": {
86 | "_appTitle": "Npgsql Documentation",
87 | "_appFooter": "© Copyright 2025 The Npgsql Development Team",
88 | "_appLogoPath": "img/logo.svg",
89 | "_enableSearch": true,
90 | "_gitContribute": {
91 | "branch": "main"
92 | }
93 | },
94 | "fileMetadata": {
95 | "_gitContribute" : {
96 | "obj/api/Npgsql/**.yml": {
97 | "repo": "https://github.com/npgsql/Npgsql",
98 | "branch": "stable"
99 | },
100 | "obj/api/EFCore.PG/**.yml": {
101 | "repo": "https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL",
102 | "branch": "stable"
103 | }
104 | }
105 | }
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/dev/release-checklist.md:
--------------------------------------------------------------------------------
1 | # Npgsql Release Checklist
2 |
3 | ## Release the actual version
4 |
5 | * Check out the git branch which represents the version you want to release. For a patch version, this will be e.g. `hotfix/9.0.2`; for a major version, this will be `main`.
6 | * Verify that the version is correct inside `Directory.Build.props`.
7 | * Do a git tag for the version (`git tag v9.0.2`) and push it to origin (`git push origin v9.0.2`).
8 | * Go to the repository's [Actions tab](https://github.com/npgsql/npgsql/actions) and wait for the build to complete.
9 | * If all goes well, you'll need to approve the deployment (this represents the push to nuget.org). If there's some sort of build failure, you can fix it, and then re-tag (`git tag -f v9.0.2`) and force-push (`git push -f upstream v9.0.2`).
10 |
11 | ## Post-release actions
12 |
13 | * If you released a patch version, you need to create the new hotfix branch and remove the old one (both locally and from `origin`): `git checkout -b hotfix/9.0.3`.
14 | * Edit `Directory.Build.props` to bump the new version, and commit (`git commit -a -m "Bump version to 9.0.3"`).
15 | * At this point, push the new hotfix branch and remove the old one:
16 |
17 | ```console
18 | git push -u upstream hotfix/9.0.3
19 |
20 | git branch -D hotfix/9.0.2
21 | git push upstream :hotfix/9.0.2
22 | ```
23 |
24 | * If you're releasing a major version, the steps are the same except that there's no old hotfix branch to remove, and the version needs to be updated in `main` (and pushed).
25 |
26 | ## Github milestone and release
27 |
28 | * Go to the [Milestones page](https://github.com/npgsql/npgsql/milestones) of the repo.
29 | * Create a milestone for the next version (e.g. 9.0.3).
30 | * If there are any open issues in the version milestone which you just published, and those weren't actually completed, move them to the new milestone for the next version (or to the backlog). The released milestone should contain only closed issues.
31 | * Edit the released milestone and set the date to today, just to have a record which version was published when.
32 | * Closed the released milestone.
33 |
34 | * Go to the [Releases page](https://github.com/npgsql/npgsql/releases) of the repo.
35 | * Click "Draft a new release", select the git tag you just released, and give the release the title "v9.0.2".
36 | * Write a short text describing the changes; add a link to the closed issue list of the milestone ([example](https://github.com/npgsql/npgsql/milestone/125?closed=1)), and click "generate release notes". For a major version, link to the release notes page in our conceptual docs.
37 | * Publish the release.
38 |
39 | ## Update API documentation
40 |
41 | [Our documentation site](https://www.npgsql.org) automatically publishes API docs for Npgsql and EFCore.PG from the `docs` branches in each of those two repos. After publishing a major version, you'll need to re-point that branch to the new version, so that API docs for that version are generated (`git reset --hard v9.0.0`). You'll then need to trigger a docs rebuild (you can just wait for the next time some small note needs to be updated etc.).
42 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/misc/other.md:
--------------------------------------------------------------------------------
1 | # Other
2 |
3 | ## PostgreSQL extensions
4 |
5 | The Npgsql EF Core provider allows you to specify PostgreSQL extensions that should be set up in your database.
6 | Simply use `HasPostgresExtension` in your context's `OnModelCreating` method:
7 |
8 | ```csharp
9 | protected override void OnModelCreating(ModelBuilder modelBuilder)
10 | => modelBuilder.HasPostgresExtension("hstore");
11 | ```
12 |
13 | ## Execution Strategy
14 |
15 | The Npgsql EF Core provider provides a retrying execution strategy, which will attempt to detect most transient PostgreSQL/network errors and will automatically retry your operation. To enable, place the following code in your context's `OnModelConfiguring`:
16 |
17 | ```csharp
18 | protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
19 | => optionsBuilder.UseNpgsql(
20 | "",
21 | options => options.EnableRetryOnFailure());
22 | ```
23 |
24 | This strategy relies on the `IsTransient` property of `NpgsqlException`.
25 |
26 | ## Certificate authentication
27 |
28 | The Npgsql allows you to provide a callback for verifying the server-provided certificates, and to provide a callback for providing certificates to the server. The latter, if properly set up on the PostgreSQL side, allows you to do client certificate authentication - see [the Npgsql docs](http://www.npgsql.org/doc/security.html#encryption-ssltls) and also [the PostgreSQL docs](https://www.postgresql.org/docs/current/static/ssl-tcp.html#SSL-CLIENT-CERTIFICATES) on setting this up.
29 |
30 | The Npgsql EF Core provider allows you to set these two callbacks on the `DbContextOptionsBuilder` as follows:
31 |
32 | ```csharp
33 | protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
34 | => optionsBuilder.UseNpgsql(
35 | "",
36 | options =>
37 | {
38 | options.RemoteCertificateValidationCallback(MyCallback1);
39 | options.ProvideClientCertificatesCallback(MyCallback2);
40 | });
41 | ```
42 |
43 | You may also consider passing `Trust Server Certificate=true` in your connection string to make Npgsql accept whatever certificate your PostgreSQL provides (useful for self-signed certificates).
44 |
45 | > [!CAUTION]
46 | > When specifying the options via `OnConfiguring`, make sure that the callbacks you pass in are static methods. Passing in instance methods causes EF Core to create a new service provider for each context instance, which can degrade performance in a significant way.
47 |
48 | ## CockroachDB Interleave In Parent
49 |
50 | If you're using CockroachDB, the Npgsql EF Core provider exposes its ["interleave in parent" feature](https://www.cockroachlabs.com/docs/stable/interleave-in-parent.html). Use the following code:
51 |
52 | ```csharp
53 | protected override void OnModelCreating(ModelBuilder modelBuilder)
54 | => modelBuilder.Entity()
55 | .UseCockroachDbInterleaveInParent(
56 | typeof(ParentEntityType),
57 | new List { "prefix_column_1", "prefix_column_2" });
58 | ```
59 |
--------------------------------------------------------------------------------
/favicons/safari-pinned-tab.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
7 |
8 | Created by potrace 1.14, written by Peter Selinger 2001-2017
9 |
10 |
12 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/release-notes/2.0.md:
--------------------------------------------------------------------------------
1 | # 2.0 Release Notes
2 |
3 | Version 2.0.0 of the Npgsql Entity Framework Core provider has been released and is available on [nuget.org](https://www.nuget.org/packages/Npgsql.EntityFrameworkCore.PostgreSQL). This version works with [version 2.0.0 of Entity Framework Core](https://blogs.msdn.microsoft.com/dotnet/2017/08/14/announcing-entity-framework-core-2-0/), and contains some new Npgsql features as well.
4 |
5 | ## New Features
6 |
7 | Aside from general EF Core features new in 2.0.0, the Npgsql provider contains the following major new features:
8 |
9 | * PostgreSQL array operation translation ([#120](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/120)). While array properties have been supported since 1.1, operations on those arrays where client-evaluated. Version 2.0 will now translate array indexing, `.Contains()`, `.SequenceEquals()` and `.Length`. See the [array mapping docs](../mapping/array.md) for more details.
10 | * A retrying execution strategy ([#155](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/155)), which will automatically retry operations on exceptions which are considered transient.
11 | * PostgreSQL extensions are now included in scaffolded models ([#102](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/102)).
12 | * More LINQ operations are translated to SQL, and more database scenarios are scaffolded correctly (see [the docs](http://www.npgsql.org/efcore/mapping-and-translation.html)).
13 |
14 | Here's the [full list of issues](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/milestone/6?closed=1). Please report any problems to .
15 |
16 | ## Upgrading from 1.x
17 |
18 | * Previously an Npgsql.EntityFrameworkCore.PostgreSQL.Design nuget package existed alongside the main package. Its contents have been merged into the main Npgsql.EntityFrameworkCore.PostgreSQL and no new version has been released.
19 | * Specifying versions when specifying PostgreSQL extensions on your model is no longer supported - this was a very rarely-used feature which interfered with extension scaffolding.
20 |
21 | ## Contributors
22 |
23 | Thank you very much to the following people who have contributed to the individual 2.0.x. releases.
24 |
25 | ### [Milestone 2.0.2](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.0.2)
26 |
27 | Contributor | Assigned issues
28 | -------------------------------- | ---------------:|
29 | [@roji](https://github.com/roji) | [4](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.0.2+is%3Aclosed+assignee%3Aroji)
30 |
31 | ### [Milestone 2.0.1](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.0.1)
32 |
33 | Contributor | Assigned issues
34 | -------------------------------- | ---------------:|
35 | [@roji](https://github.com/roji) |[5](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.0.1+is%3Aclosed+assignee%3Aroji)
36 |
37 | ### [Milestone 2.0.0](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.0.0)
38 |
39 | Contributor | Assigned issues
40 | -------------------------------- | ---------------:|
41 | [@roji](https://github.com/roji) | [16](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.0.0+is%3Aclosed+assignee%3Aroji)
42 |
--------------------------------------------------------------------------------
/dev/build-server.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: page
3 | title: Build Server Notes
4 | ---
5 |
6 | This page describes the steps used to set up the Npgsql build server.
7 |
8 | If you're upgrading the TeamCity version, see "Give agent service start/stop permissions" below.
9 |
10 | ## Install all supported versions of the Postgresql backend
11 |
12 | At the time of writing, this means 9.1, 9.2, 9.3, 9.4, 9.5. They are configured on ports 5491, 5492, 5493, 5494, 5495.
13 |
14 | For SSPI/GSS tests, you need to set up a user with the same name as the user that will be running the tests (i.e. teamcity_agent).
15 | You must also add the following lines at the top of each PG's pg_hba.conf to set up SSPI/GSS for that user:
16 |
17 | ```
18 | host all teamcity_agent 127.0.0.1/32 sspi include_realm=0
19 | host all teamcity_agent ::1/128 sspi include_realm=0
20 | ```
21 |
22 | See [this page on SSPI](https://wiki.postgresql.org/wiki/Configuring_for_single_sign-on_using_SSPI_on_Windows).
23 |
24 | ## Install a TeamCity-dedicated Postgresql cluster
25 |
26 | TeamCity itself requires an SQL database, but we don't want it to run in the same environment as that used for the unit tests. So choosing the latest stable Postgresql version (9.6 at time of writing), we create a new Postgresql cluster: `initdb -U postgres -W c:\dev\TeamcityPostgresData`
27 |
28 | Next we set up a Windows service that starts up the new cluster: `pg_ctl register -N postgresql-9.6-teamcity -U teamcity -P -D c:\dev\TeamcityPostgresData`
29 |
30 | Finally, create a a user and database and point TeamCity to it.
31 |
32 | ## Install .NET SDKs for all supported .NET versions
33 |
34 | * .NET 4.0 (Windows 7 SDK): http://www.microsoft.com/en-us/download/details.aspx?id=8279
35 | * .NET 4.5 (Windows 8 SDK): http://msdn.microsoft.com/en-us/windows/hardware/hh852363.aspx
36 | * .NET 4.5.1 (Windows 8.1 SDK): http://msdn.microsoft.com/en-us/windows/hardware/bg162891.aspx
37 |
38 | While installing the SDK for .NET 4.0, I had this problem: http://support.microsoft.com/kb/2717426
39 |
40 | ## Give agent service start/stop permissions
41 |
42 | When upgrading TeamCity, the agent needs to be able to stop and start the Windows service. This is how you can grant a normal user specific permissions on specific services:
43 |
44 | * Download and install subinacl from http://www.microsoft.com/en-us/download/details.aspx?id=23510
45 | * cd C:\Program Files (x86)\Windows Resource Kits\Tools\
46 | * subinacl /service TCBuildAgent /grant=teamcity_agent=TO
47 |
48 | ## Update build status back in github
49 |
50 | * Download the plugin from https://github.com/jonnyzzz/TeamCity.GitHub, get the ZIP
51 | * Drop the ZIP in the TeamCity content dir's plugins subdir
52 | * Add the Build Feature "Report change status to GitHub". Configure everything appropriately, and be sure the user you set up has push access to the repository!
53 |
54 | ## Install assorted dev utilities
55 |
56 | * GitVersion (with Chocolatey)
57 | * WiX toolset (v3.10.1 at time of writing)
58 |
59 | ## Install WiX
60 |
61 | WiX 3.10 has a dependency on .NET Framework 3.5, but there's some issue blocking its installation on Windows Server 2012 R2 (at least on Azure).
62 | A good workaround is to simply install via Powershell (`Add-WindowsFeature NET-Framework-Core`), see
63 | https://msdn.microsoft.com/en-us/library/dn169001(v=nav.70).aspx#InstallNET35.
64 |
65 | Note that ICE validation is disabled because apparently it requires an interactive account or admin privileges, which doesn't work in continuous integration.
66 |
67 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ## Offical Packages
4 |
5 | Official releases of Npgsql are always available on [nuget.org](https://www.nuget.org/packages/Npgsql/). This is the recommended way to use Npgsql.
6 |
7 | We occasionally publish previews to nuget.org as well - these are generally quite safe for use, and can help us find issues before official packages are released.
8 |
9 | ## Daily Builds
10 |
11 | In addition to the official releases, we automatically publish CI packages for every build. You can use these to test new features or bug fixes that haven't been released yet. Two CI nuget feeds are available:
12 |
13 | * [The patch feed](https://www.myget.org/feed/Packages/npgsql) contains CI packages for the next hotfix/patch version. These packages are generally very stable and safe.
14 | To use it, add to your NuGet.Config.
15 | * [The vNext feed](https://www.myget.org/feed/Packages/npgsql-vnext) contains CI packages for the next minor or major versions. These are less stable and should be tested with care.
16 | To use it, add to your NuGet.Config.
17 |
18 | ## Older, unsupported installation methods
19 |
20 | ### Windows MSI Installer
21 |
22 | If you need to use Npgsql as a database provider for PowerBI, Excel or other similar systems, you need to install it into the Windows [Global Assembly Cache (GAC)](https://msdn.microsoft.com/en-us/library/yf1d93sz%28v=vs.110%29.aspx?f=255&MSPPError=-2147217396), and do some special configuration. Npgsql versions prior to 5.0.0 provided a Windows installer (MSI) which does the installation for you, and which are still usable and maintained with critical bug fixes. Do not use the Windows MSI installer unless you're sure that your program requires GAC installation - this method is otherwise highly discouraged.
23 |
24 | The Npgsql Windows MSI installer for Npgsql 4.1.x can be found on [our Github releases page](https://github.com/npgsql/npgsql/releases): it will install Npgsql (and optionally the Entity Framework providers) into your GAC and add Npgsql's DbProviderFactory into your `machine.config` file. Some additional assemblies which are Npgsql dependencies will be installed into the GAC as well (e.g. `System.Threading.Tasks.Extensions.dll`).
25 |
26 | ### Visual Studio Integration
27 |
28 | Older versions of Npgsql came with a Visual Studio extension (VSIX) which integrated PostgreSQL access into Visual Studio. The extension allowed connecting to PostgreSQL from within Visual Studio's Server Explorer, creating an Entity Framework 6 model from an existing database, etc. The extension had various limitations and known issues, mainly because of problems with Visual Studio's extensibility around database.
29 |
30 | Use of the extension is no longer recommended. However, if you'd like to give it a try, it can be installed directly from [the Visual Studio Marketplace page](https://marketplace.visualstudio.com/vsgallery/258be600-452d-4387-9a2f-89ae10e84ae0).
31 |
32 | ### DbProviderFactory in .NET Framework
33 |
34 | On .NET Framework, you can register Npgsql's `DbProviderFactory` in your applications `App.Config` (or `Web.Config`), allowing you to use general, provider-independent ADO.NET types in your application (e.g. `DbConnection` instead of `NpgsqlConnection`) - [see this tutorial](https://msdn.microsoft.com/en-us/library/dd0w4a2z%28v=vs.110%29.aspx?f=255&MSPPError=-21472173960). To do this, add the following to your `App.config`:
35 |
36 | ```xml
37 |
38 |
39 |
40 |
41 |
42 | ```
43 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/diagnostics/metrics.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Metrics
2 |
3 | Npgsql supports reporting aggregated metrics which provide snapshots on its state and activities at a given point. These can be especially useful for diagnostics issues such as connection leaks, or doing general performance analysis Metrics are reported via the standard .NET System.Diagnostics.Metrics API; [see these docs](https://learn.microsoft.com/dotnet/core/diagnostics/metrics) for more details. The Npgsql metrics implement the experimental [OpenTelemetry semantic conventions for database metrics](https://opentelemetry.io/docs/specs/semconv/database/database-metrics/) - adding some additional useful ones - and will evolve as that specification stabilizes.
4 |
5 | > [!NOTE]
6 | > Npgsql 10.0 changed the metrics names to align with the OpenTelemetry standard. The names shown below reflect the Npgsql 10 counters.
7 | >
8 | > Npgsql versions before 8.0, as well as TFMs under net6.0, emit metrics via the older Event Counters API instead of the new OpenTelemetry ones.
9 |
10 | Metrics are usually collected and processed via tools such as [Prometheus](https://prometheus.io), and plotted on dashboards via tools such as [Grafana](https://grafana.com). Configuring .NET to emit metrics to these tools is beyond the scope of this documentation, but you can use the command-line tool `dotnet-counters` to quickly test Npgsql's support. To collect metrics via `dotnet-counters`, [install the `dotnet-counters` tool](https://docs.microsoft.com/dotnet/core/diagnostics/dotnet-counters). Then, find out your process PID, and run it as follows:
11 |
12 | ```output
13 | dotnet counters monitor Npgsql -p
14 | ```
15 |
16 | `dotnet-counters` will now attach to your running process and start reporting continuous counter data:
17 |
18 | ```output
19 | [Npgsql]
20 | db.client.operation.npgsql.bytes_read (By / 1 sec)
21 | db.client.connection.pool.name=CustomersDB 1,020
22 | db.client.operation.npgsql.bytes_written (By / 1 sec)
23 | db.client.connection.pool.name=CustomersDB 710
24 | db.client.operation.duration (s)
25 | db.client.connection.pool.name=CustomersDB,Percentile=50 0.001
26 | db.client.connection.pool.name=CustomersDB,Percentile=95 0.001
27 | db.client.connection.pool.name=CustomersDB,Percentile=99 0.001
28 | db.client.operation.npgsql.executing ({command})
29 | db.client.connection.pool.name=CustomersDB 2
30 | db.client.operation.npgsql.prepared_ratio
31 | db.client.connection.pool.name=CustomersDB 0
32 | db.client.connection.max ({connection})
33 | db.client.connection.pool.name=CustomersDB 100
34 | db.client.connection.count ({connection})
35 | db.client.connection.pool.name=CustomersDB,state=idle 3
36 | db.client.connection.pool.name=CustomersDB,state=used 2
37 | ```
38 |
39 | Note that Npgsql emits multiple *dimensions* with the metrics, e.g. the connection states (idle or used). In addition, an identifier for the connection pool - or data source - is emitted with every metric, allowing you to separately track e.g. multiple databases accessed in the same applications. By default, the `pool.name` will be the connection string, but it can be useful to give your data sources a name for easier and more consistent tracking:
40 |
41 | ```csharp
42 | var builder = new NpgsqlDataSourceBuilder("Host=localhost;Username=test;Password=test")
43 | {
44 | Name = "CustomersDB"
45 | };
46 | await using var dataSource = builder.Build();
47 | ```
48 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/compatibility.md:
--------------------------------------------------------------------------------
1 | # Compatibility Notes
2 |
3 | This page centralizes Npgsql's compatibility status with PostgreSQL and other components, and documents some important gotchas.
4 |
5 | ## PostgreSQL
6 |
7 | We aim to be compatible with all [currently supported PostgreSQL versions](http://www.postgresql.org/support/versioning/), which means 5 years back.
8 | Earlier versions may still work but we don't perform continuous testing on them or commit to resolving issues on them.
9 |
10 | ## ADO.NET
11 |
12 | Npgsql is an ADO.NET-compatible provider, so it has the same APIs as other .NET database drivers and should behave the same.
13 | Please let us know if you notice any non-standard behavior.
14 |
15 | ## NativeAOT and trimming
16 |
17 | NativeAOT allows using ahead-of-time compilation to publish a fully self-contained application that has been compiled to native code. Native AOT apps have faster startup time and smaller memory footprints, and thanks to trimming can also have a much smaller size footprint on disk.
18 |
19 | Starting with version 8.0, Npgsql is fully compatible with NativeAOT and trimming. The majority of features are compatible with NativeAOT/trimming and can be used without issues, and most applications using Npgsql can be used as-is with NativeAOT/trimming without any changes. A few features which are incompatible require an explicit code opt-in, which generates a warning if used with NativeAOT/trimming enabled.
20 |
21 | ## .NET Framework/.NET Core/mono
22 |
23 | Npgsql 4.* targets .NET Framework 4.6.1, as well as [.NET Standard 2.0](https://docs.microsoft.com/en-us/dotnet/standard/net-standard) which allows it to run on .NET Core. It is also tested and runs well on mono.
24 |
25 | Npgsql 5.* targets .NET Standard 2.0 and .NET 5. Starting with this version, we no longer run regression tests on .NET Framework and mono. In addition, the Visual Studio extension (VSIX) and the MSI GAC installer have been discontinued.
26 |
27 | ## pgbouncer
28 |
29 | Npgsql works well with PgBouncer, but there are some quirks to be aware of.
30 |
31 | * In many cases, you'll want to turn off Npgsql's internal connection pool by specifying `Pooling=false` on the connection string.
32 | * If you decide to keep Npgsql pooling on along with PgBouncer, and are using PgBouncer's transaction or statement mode, then you need to specify `No Reset On Close=true` on the connection string. This disables Npgsql's connection reset logic (`DISCARD ALL`), which gets executed when a connection is returned to Npgsql's pool, and which makes no sense in these modes.
33 | * Prior to version 3.1, Npgsql sends the `statement_timeout` startup parameter when it connects, but this parameter isn't supported by pgbouncer.
34 | You can get around this by specifying `CommandTimeout=0` on the connection string, and then manually setting the `CommandTimeout`
35 | property on your `NpgsqlCommand` objects. Version 3.1 no longer sends `statement_timeout`.
36 | * PgBouncer below 1.12 doesn't support SASL authentication.
37 |
38 | ## Amazon Redshift
39 |
40 | Amazon Redshift is a cloud-based data warehouse originally based on PostgreSQL 8.0.2.
41 | In addition, due to its nature some features have been removed and others changed in ways that make them incompatible with PostgreSQL.
42 | We try to support Redshift as much as we can, please let us know about issues you run across.
43 |
44 | First, check out Amazon's [page about Redshift and PostgreSQL](http://docs.aws.amazon.com/redshift/latest/dg/c_redshift-and-postgres-sql.html) which
45 | contains lots of useful compatibility information.
46 |
47 | Additional known issues:
48 |
49 | * If you want to connect over SSL, your connection string must contain `Server Compatibility Mode=Redshift`, otherwise you'll get a connection
50 | error about `ssl_renegotiation_limit`.
51 | * Entity Framework with database-computed identity values don't work with Redshift, since it doesn't support sequences
52 | (see issue [#544](https://github.com/npgsql/npgsql/issues/544)).
53 |
54 | ## DigitalOcean Managed Database
55 |
56 | DigitalOcean's Managed Database services requires you to connect to PostgreSQL over SSL. Unfortunately when you enable it in your connection string, you will get the same error regarding `ssl_renegotiation_limit` as Amazon Redshift. The Redshift compatibility mode setting resolves the issue on DigitalOcean.
57 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/copy.md:
--------------------------------------------------------------------------------
1 | # COPY
2 |
3 | PostgreSQL has a feature allowing efficient bulk import or export of data to and from a table. This is usually a much faster way of getting data in and out of a table than using INSERT and SELECT. See documentation for the [COPY command](http://www.postgresql.org/docs/current/static/sql-copy.html) for more details.
4 |
5 | Npgsql supports three COPY operation modes: binary, text and raw binary.
6 |
7 | ## Binary COPY
8 |
9 | This mode uses the efficient PostgreSQL binary format to transfer data in and out of the database. The user uses an API to read and write rows and fields, which Npgsql decodes and encodes. When you've finished, you must call `Complete()` to save the data; not doing so will cause the COPY operation to be rolled back when the writer is disposed (this behavior is important in case an exception is thrown).
10 |
11 | > [!WARNING]
12 | > It is the your responsibility to read and write the correct type! If you use COPY to write an int32 into a string field you may get an exception, or worse, silent data corruption. It is also highly recommended to use the overload of `Write()` which accepts an `NpgsqlDbType`, allowing you to unambiguously specify exactly what type you want to write. Test your code thoroughly.
13 |
14 | ```csharp
15 | // Import two columns to table data
16 | using (var writer = conn.BeginBinaryImport("COPY data (field_text, field_int2) FROM STDIN (FORMAT BINARY)"))
17 | {
18 | writer.StartRow();
19 | writer.Write("Hello");
20 | writer.Write(8, NpgsqlDbType.Smallint);
21 |
22 | writer.StartRow();
23 | writer.Write("Goodbye");
24 | writer.WriteNull();
25 |
26 | writer.Complete();
27 | }
28 |
29 | // Export two columns to table data
30 | using (var reader = Conn.BeginBinaryExport("COPY data (field_text, field_int2) TO STDOUT (FORMAT BINARY)"))
31 | {
32 | reader.StartRow();
33 | Console.WriteLine(reader.Read());
34 | Console.WriteLine(reader.Read(NpgsqlDbType.Smallint));
35 |
36 | reader.StartRow();
37 | reader.Skip();
38 | Console.WriteLine(reader.IsNull); // Null check doesn't consume the column
39 | Console.WriteLine(reader.Read());
40 |
41 | reader.StartRow(); // Last StartRow() returns -1 to indicate end of data
42 | }
43 | ```
44 |
45 | ## Text COPY
46 |
47 | This mode uses the PostgreSQL text or csv format to transfer data in and out of the database. It is the user's responsibility to format the text or CSV appropriately, Npgsql simply provides a TextReader or Writer. This mode is less efficient than binary copy, and is suitable mainly if you already have the data in a CSV or compatible text format and don't care about performance.
48 |
49 | ```csharp
50 | using (var writer = conn.BeginTextImport("COPY data (field_text, field_int4) FROM STDIN")) {
51 | writer.Write("HELLO\t1\n");
52 | writer.Write("GOODBYE\t2\n");
53 | }
54 |
55 | using (var reader = conn.BeginTextExport("COPY data (field_text, field_int4) TO STDOUT")) {
56 | Console.WriteLine(reader.ReadLine());
57 | Console.WriteLine(reader.ReadLine());
58 | }
59 | ```
60 |
61 | ## Raw Binary COPY
62 |
63 | In this mode, data transfer is binary, but Npgsql does no encoding or decoding whatsoever - data is exposed as a raw .NET Stream. This mode makes sense only for bulk data and restore a table: the table is saved as a blob, which can later be restored. If you need to actually make sense of the data, you should be using regular binary mode instead (not raw).
64 |
65 | Example:
66 |
67 | ```csharp
68 | int len;
69 | var data = new byte[10000];
70 | // Export table1 to data array
71 | using (var inStream = conn.BeginRawBinaryCopy("COPY table1 TO STDOUT (FORMAT BINARY)")) {
72 | // We assume the data will fit in 10000 bytes, in real usage you would read repeatedly, write into a file.
73 | len = inStream.Read(data, 0, data.Length);
74 | }
75 |
76 | // Import data array into table2
77 | using (var outStream = conn.BeginRawBinaryCopy("COPY table2 FROM STDIN (FORMAT BINARY)")) {
78 | outStream.Write(data, 0, len);
79 | }
80 | ```
81 |
82 | ## Cancel
83 |
84 | Import operations can be cancelled at any time by disposing `NpgsqlBinaryImporter` without calling `Complete()` on it. Export operations can be cancelled as well, by calling `Cancel()`.
85 |
86 | ## Other
87 |
88 | See the CopyTests.cs test fixture for more usage samples.
89 |
--------------------------------------------------------------------------------
/img/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/release-notes/1.1.md:
--------------------------------------------------------------------------------
1 | # Migrating to 1.1
2 |
3 | Version 1.1.0 of the Npgsql Entity Framework Core provider has been released and is available on nuget. This version works with [version 1.1.0 of Entity Framework Core](https://blogs.msdn.microsoft.com/dotnet/2016/10/25/announcing-entity-framework-core-1-1-preview-1/), and contains some new Npgsql features as well. Note that if you're using the command-line tools, you'll have to modify your tools section as described in the EF Core release post:
4 |
5 | ```json
6 | "tools": {
7 | "Microsoft.EntityFrameworkCore.Tools.DotNet": "1.0.0-preview3-final"
8 | },
9 | ```
10 |
11 | ## New Features
12 |
13 | Aside from general EF Core features, version 1.1.0 of the Npgsql provider contains the following:
14 |
15 | * Hilo key generation ([#5](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/5)). This can be a much more efficient way to generate autoincrement key values.
16 | * PostgreSQL array mapping ([#15](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/15)). This allows you to have plain CLR arrays on your entities, and have those arrays mapped to native [PostgreSQL array columns](https://www.postgresql.org/docs/current/static/arrays.html).
17 | * Optimistic concurrency with PostgreSQL's xmin column ([#19](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/19)). Simply specify `.UseXminAsConcurrencyToken()` on an entity to start using this, see the [EF docs for more details](https://docs.efproject.net/en/latest/modeling/concurrency.html).
18 | * Cleanup of how serial (autoincrement) and generated GUID/UUID columns are managed.
19 |
20 | Here's the [full list of issues](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/milestone/3?closed=1). Please report any problems to .
21 |
22 | ## Upgrading from 1.0.x
23 |
24 | If you've used 1.0.x without migrations, you can simply upgrade and everything should just work. Unfortunately, if you already have migrations from 1.0.x you'll have to do some manual fixups because of some bad decisions that were previously made. If deleting your old migrations and starting over (e.g. non-production database) is an option, you may wish to do so. The following are instructions for fixing up 1.0.x migrations.
25 |
26 | First, Npgsql 1.0.x used a problematic method to identify serial (autoincrement) columns in migrations. If you look at your migration code you'll see `.Annotation("Npgsql:ValueGeneratedOnAdd", true)` on various columns. Unfortunately this annotation is also present on non-serial columns, e.g. columns with default values. This causes various issues and has been replaced in 1.1. However, you'll have to manually remove `.Annotation("Npgsql:ValueGeneratedOnAdd", true)`, and replace it with `.Annotation("Npgsql:ValueGenerationStrategy", NpgsqlValueGenerationStrategy.SerialColumn)` but *only* on columns which should be serial (e.g. not on columns with defaults). If you attempt to run a migration that has the old annotation, Npgsql will throw an exception and refuse to run your migrations.
27 |
28 | Unfortunately, this change will cause some incorrect changes the first time you add a migration after the upgrade. To avoid this, simply add a dummy migration right after upgrading to 1.1 and then delete the two new files generated for the dummy migration, *but keep the changes made to your ModelSnapshot.cs*. From this point on everything should be fine. *Make sure you have no pending changes to your model before doing this!*.
29 |
30 | Apologies for this problematic upgrade procedure, it should at least keep things clean going forward.
31 |
32 | ## Contributors
33 |
34 | Thank you very much to the following people who have contributed to the individual 1.1.x. releases.
35 |
36 | ### [Milestone 1.1.1](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A1.1.1)
37 |
38 | Contributor | Assigned issues
39 | -------------------------------- | ---------------:|
40 | [@roji](https://github.com/roji) | [8](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A1.1.1+is%3Aclosed+assignee%3Aroji)
41 |
42 | ### [Milestone 1.1.0](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A1.1.0)
43 |
44 | Contributor | Assigned issues
45 | -------------------------------- | ---------------:|
46 | [@roji](https://github.com/roji) | [11](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A1.1.0+is%3Aclosed+assignee%3Aroji)
47 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/types/geojson.md:
--------------------------------------------------------------------------------
1 | # PostGIS/GeoJSON Type Plugin
2 |
3 | The [Npgsql.GeoJSON](https://nuget.org/packages/Npgsql.GeoJSON) plugin makes Npgsql read and write PostGIS spatial types as [GeoJSON (RFC7946) types](http://geojson.org/), via the [GeoJSON.NET](https://github.com/GeoJSON-Net/GeoJSON.Net) library.
4 |
5 | As an alternative, you can use [Npgsql.NetTopologySuite](nts.md), which is a full-fledged .NET spatial library with many features.
6 |
7 | ## Setup
8 |
9 | To avoid forcing a dependency on the GeoJSON library for users not using spatial, GeoJSON support is delivered as a separate plugin. To use the plugin, simply add a dependency on [Npgsql.GeoJSON](https://www.nuget.org/packages/Npgsql.GeoJSON) and set it up in one of the following ways:
10 |
11 | ### [NpgsqlDataSource](#tab/datasource)
12 |
13 | > [!NOTE]
14 | > `NpgsqlDataSource` was introduced in Npgsql 7.0, and is the recommended way to manage type mapping. If you're using an older version, see the other methods.
15 |
16 | ```csharp
17 | var dataSourceBuilder = new NpgsqlDataSourceBuilder(...);
18 | dataSourceBuilder.UseGeoJson();
19 | await using var dataSource = dataSourceBuilder.Build();
20 | ```
21 |
22 | ### [Global mapping](#tab/global)
23 |
24 | If you're using an older version of Npgsql which doesn't yet support `NpgsqlDataSource`, you can configure mappings globally for all connections in your application:
25 |
26 | ```csharp
27 | NpgsqlConnection.GlobalTypeMapper.UseGeoJson();
28 | ```
29 |
30 | For this to work, you must place this code at the beginning of your application, before any other Npgsql API is called. Note that in Npgsql 7.0, global type mappings are obsolete (but still supported) - `NpgsqlDataSource` is the recommended way to manage type mappings.
31 |
32 | ### [Connection mapping](#tab/connection)
33 |
34 | > [!NOTE]
35 | > This mapping method has been removed in Npgsql 7.0.
36 |
37 | Older versions of Npgsql supported configuring a type mapping on an individual connection, as follows:
38 |
39 | ```csharp
40 | var conn = new NpgsqlConnection(...);
41 | conn.TypeMapper.UseGeoJson();
42 | ```
43 |
44 | ***
45 |
46 | ## Reading and Writing Geometry Values
47 |
48 | When reading PostGIS values from the database, Npgsql will automatically return the appropriate GeoJSON types: `Point`, `LineString`, and so on. Npgsql will also automatically recognize GeoJSON's types in parameters, and will automatically send the corresponding PostGIS type to the database. The following code demonstrates a roundtrip of a GeoJSON `Point` to the database:
49 |
50 | ```csharp
51 | conn.ExecuteNonQuery("CREATE TEMP TABLE data (geom GEOMETRY)");
52 |
53 | await using (var cmd = new NpgsqlCommand("INSERT INTO data (geom) VALUES ($1)", conn))
54 | {
55 | cmd.Parameters.Add(new() { Value = new Point(new Position(51.899523, -2.124156)) });
56 | await cmd.ExecuteNonQueryAsync();
57 | }
58 |
59 | await using (var cmd = new NpgsqlCommand("SELECT geom FROM data", conn))
60 | await using (var reader = await cmd.ExecuteReaderAsync())
61 | {
62 | await reader.ReadAsync();
63 | var point2 = reader.GetFieldValue(0);
64 | }
65 | ```
66 |
67 | You may also explicitly specify a parameter's type by setting `NpgsqlDbType.Geometry`.
68 |
69 | ## Geography (geodetic) Support
70 |
71 | PostGIS has two types: `geometry` (for Cartesian coordinates) and `geography` (for geodetic or spherical coordinates). You can read about the geometry/geography distinction [in the PostGIS docs](https://postgis.net/docs/manual-2.4/using_postgis_dbmanagement.html#PostGIS_Geography) or in [this blog post](http://workshops.boundlessgeo.com/postgis-intro/geography.html). In a nutshell, `geography` is much more accurate when doing calculations over long distances, but is more expensive computationally and supports only a small subset of the spatial operations supported by `geometry`.
72 |
73 | Npgsql uses the same GeoJSON types to represent both `geometry` and `geography` - the `Point` type represents a point in either Cartesian or geodetic space. You usually don't need to worry about this distinction because PostgreSQL will usually cast types back and forth as needed. However, it's worth noting that Npgsql sends Cartesian `geometry` by default, because that's the usual requirement. You have the option of telling Npgsql to send `geography` instead by specifying `NpgsqlDbType.Geography`:
74 |
75 | ```csharp
76 | using (var cmd = new NpgsqlCommand("INSERT INTO data (geog) VALUES ($1)", conn))
77 | {
78 | cmd.Parameters.Add(new() { Value = point, NpgsqlDbType = NpgsqlDbType.Geography });
79 | await cmd.ExecuteNonQueryAsync();
80 | }
81 | ```
82 |
83 | If you prefer to use `geography` everywhere by default, you can also specify that when setting up the plugin:
84 |
85 | ```csharp
86 | dataSourceBuilder.UseGeoJson(geographyAsDefault: true);
87 | ```
88 |
--------------------------------------------------------------------------------
/img/jetbrains-logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
11 |
14 |
15 |
16 |
17 |
18 |
19 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
31 |
32 |
33 |
34 |
35 |
36 |
39 |
40 |
41 |
42 |
43 |
45 |
47 |
48 |
51 |
54 |
56 |
57 |
59 |
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/modeling/indexes.md:
--------------------------------------------------------------------------------
1 | # Indexes
2 |
3 | PostgreSQL and the Npgsql provider support the standard index modeling described in [the EF Core docs](https://docs.microsoft.com/ef/core/modeling/indexes). This page describes some supported PostgreSQL-specific features.
4 |
5 | ## Covering indexes (INCLUDE)
6 |
7 | PostgreSQL supports [covering indexes](https://paquier.xyz/postgresql-2/postgres-11-covering-indexes), which allow you to include "non-key" columns in your indexes. This allows you to perform index-only scans and can provide a significant performance boost:
8 |
9 | ```csharp
10 | protected override void OnModelCreating(ModelBuilder modelBuilder)
11 | => modelBuilder.Entity()
12 | .HasIndex(b => b.Id)
13 | .IncludeProperties(b => b.Name);
14 | ```
15 |
16 | This will create an index for searching on `Id`, but containing also the column `Name`, so that reading the latter will not involve accessing the table. The SQL generated is as follows:
17 |
18 | ```sql
19 | CREATE INDEX "IX_Blog_Id" ON blogs ("Id") INCLUDE ("Name");
20 | ```
21 |
22 | ## Treating nulls as non-distinct
23 |
24 | By default, when you create a unique index, PostgreSQL treats null values as distinct; this means that a unique index can contain multiple null values in a column. When creating an index, you can also instruct PostgreSQL that nulls should be treated as *non-distinct*; this causes a unique constraint violation to be raised if a column contains multiple null values:
25 |
26 | ```csharp
27 | protected override void OnModelCreating(ModelBuilder modelBuilder)
28 | => modelBuilder.Entity()
29 | .IsUnique()
30 | .AreNullsDistinct(false);
31 | ```
32 |
33 | ## Index methods
34 |
35 | PostgreSQL supports a number of *index methods*, or *types*. These are specified at index creation time via the `USING ` clause, see the [PostgreSQL docs for `CREATE INDEX`](https://www.postgresql.org/docs/current/static/sql-createindex.html) and [this page](https://www.postgresql.org/docs/current/static/indexes-types.html) for information on the different types.
36 |
37 | The Npgsql EF Core provider allows you to specify the index method to be used by calling `HasMethod()` on your index in your context's `OnModelCreating` method:
38 |
39 | ```csharp
40 | protected override void OnModelCreating(ModelBuilder modelBuilder)
41 | => modelBuilder.Entity()
42 | .HasIndex(b => b.Url)
43 | .HasMethod("gin");
44 | ```
45 |
46 | ## Index operator classes
47 |
48 | PostgreSQL allows you to specify [operator classes on your indexes](https://www.postgresql.org/docs/current/indexes-opclass.html), to allow tweaking how the index should work. Use the following code to specify an operator class:
49 |
50 | ```csharp
51 | protected override void OnConfiguring(DbContextOptionsBuilder builder)
52 | => modelBuilder.Entity()
53 | .HasIndex(b => new { b.Id, b.Name })
54 | .HasOperators(null, "text_pattern_ops");
55 | ```
56 |
57 | Note that each operator class is used for the corresponding index column, by order. In the example above, the `text_pattern_ops` class will be used for the `Name` column, while the `Id` column will use the default class (unspecified), producing the following SQL:
58 |
59 | ```sql
60 | CREATE INDEX "IX_blogs_Id_Name" ON blogs ("Id", "Name" text_pattern_ops);
61 | ```
62 |
63 | ## Storage parameters
64 |
65 | PostgreSQL allows configuring indexes with *storage parameters*, which can tweak their behaviors in various ways; which storage parameters are available depends on the chosen index method. [See the PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-STORAGE-PARAMETERS) for more information.
66 |
67 | To configure a storage parameter on an index, use the following code:
68 |
69 | ```csharp
70 | protected override void OnModelCreating(ModelBuilder modelBuilder)
71 | => modelBuilder.Entity()
72 | .HasIndex(b => b.Url)
73 | .HasStorageParameter("fillfactor", 70);
74 | ```
75 |
76 | ## Creating indexes concurrently
77 |
78 | Creating an index can interfere with regular operation of a database. Normally PostgreSQL locks the table to be indexed against writes and performs the entire index build with a single scan of the table. Other transactions can still read the table, but if they try to insert, update, or delete rows in the table they will block until the index build is finished. This could have a severe effect if the system is a live production database. Very large tables can take many hours to be indexed, and even for smaller tables, an index build can lock out writers for periods that are unacceptably long for a production system.
79 |
80 | The EF provider allows you to specify that an index should be created *concurrently*, partially mitigating the above issues:
81 |
82 | ```csharp
83 | protected override void OnModelCreating(ModelBuilder modelBuilder)
84 | => modelBuilder.Entity()
85 | .HasIndex(b => b.Url)
86 | .IsCreatedConcurrently();
87 | ```
88 |
89 | > [!CAUTION]
90 | > Do not enable this feature before reading the [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY) and understanding the full implications of concurrent index creation.
91 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/faq.md:
--------------------------------------------------------------------------------
1 | # FAQ
2 |
3 | ## How can I call a PostgreSQL 11 stored procedure? I tried doing so with CommandType.StoredProcedure and got an error...
4 |
5 | PostgreSQL 11 stored procedures can be called, but unfortunately not with `CommandType.StoredProcedure`. PostgreSQL has supported stored *functions* for a long while, and since these have acted as replacements for non-existing procedures, Npgsql's `CommandType.StoredProcedure` has been implemented to invoke them; this means that `CommandType.StoredProcedure` translates into `SELECT * FROM my_stored_function()`. The new stored procedures introduce a special invocation syntax - `CALL my_stored_procedure()` - which is incompatible with the existing stored function syntax.
6 |
7 | On the brighter side, it's very easy to invoke stored procedures (or functions) yourself - you don't really need `CommandType.StoredProcedure`. Simply create a regular command and set `CommandText` to `CALL my_stored_procedure(@p1, @p2)`, handling parameters like you would any other statement. In fact, with Npgsql and PostgreSQL, `CommandType.StoredProcedure` doesn't really have any added value over constructing the command yourself.
8 |
9 | ## I opened a pooled connection, and it throws right away when I use it! What gives?
10 |
11 | We know it's frustrating and seems weird, but this behavior is by-design.
12 |
13 | While your connection is idle in the pool, any number of things could happen to it - a timeout could cause it to break, or some other similar network problem. Unfortunately, with the way networking works, there is no reliable way for us to know on the client if a connection is still alive; the only thing we can do is send something to PostgreSQL, and wait for the response to arrive. Doing this whenever a connection is handed out from the pool would kill the very reason pooling exists - it would dramatically slow down pooling, which is there precisely to avoid unneeded network roundtrips.
14 |
15 | But the reality is even more grim than that. Even if Npgsql checked whether a connection is live before handing it out of the pool, there's nothing guaranteeing that the connection won't break 1 millisecond after that check - it's a total race condition. So the check wouldn't just degrade performance, it would also be largely useless. The reality of network programming is that I/O errors can occur at any point, and your code must take that into account if it has high reliability requirements. Resilience/retrying systems can help you with this; take a look at [Polly](https://github.com/App-vNext/Polly) as an example.
16 |
17 | One thing which Npgsql can do to help a bit, is the [keepalive feature](https://www.npgsql.org/doc/keepalive.html); this does a roundtrip with PostgreSQL every e.g. 1 second - including when the connection is idle in the pool - and destroys it if an I/O error occurs. However, depending on timing, you may still get a broken connection out of the pool - unfortunately there's simply no way around that.
18 |
19 | ## I get an exception "The field field1 has a type currently unknown to Npgsql (OID XXXXX). You can retrieve it as a string by marking it as unknown".
20 |
21 | Npgsql has to implement support for each PostgreSQL type, and it seems you've stumbled upon an unsupported type.
22 |
23 | First, head over to our [issues page](https://github.com/npgsql/npgsql/issues) and check if an issue already exists on your type,
24 | otherwise please open one to let us know.
25 |
26 | Then, as a workaround, you can have your type treated as text - it will be up to you to parse it in your program.
27 | One simple way to do this is to append ::TEXT in your query (e.g. `SELECT 3::TEXT`).
28 |
29 | If you don't want to modify your query, Npgsql also includes an API for requesting types as text.
30 | The following code returns all the columns in the resultset as text:
31 |
32 | ```csharp
33 | await using (var cmd = new NpgsqlCommand(...)) {
34 | cmd.AllResultTypesAreUnknown = true;
35 | await using var reader = await cmd.ExecuteReaderAsync();
36 | // Read everything as strings
37 | }
38 | ```
39 |
40 | You can also specify text only for some columns in your resultset:
41 |
42 | ```csharp
43 | await using (var cmd = new NpgsqlCommand(...)) {
44 | // Only the second field will be fetched as text
45 | cmd.UnknownResultTypeList = new[] { false, true };
46 | await using var reader = await cmd.ExecuteReaderAsync();
47 | // Read everything as strings
48 | }
49 | ```
50 |
51 | ## I'm trying to write a JSONB type and am getting 'column "XXX" is of type jsonb but expression is of type text'
52 |
53 | When sending a JSONB parameter, you must explicitly specify its type to be JSONB with NpgsqlDbType:
54 |
55 | ```csharp
56 | await using (var cmd = new NpgsqlCommand("INSERT INTO foo (col) VALUES (@p)", conn)) {
57 | cmd.Parameters.AddWithValue("p", NpgsqlDbType.Jsonb, jsonText);
58 | }
59 | ```
60 |
61 | ## I'm trying to apply an Entity Framework 6 migration and I get `Type is not resolved for member 'Npgsql.NpgsqlException,Npgsql'`
62 |
63 | Unfortunately, a shortcoming of EF6 requires you to have Npgsql.dll in the Global Assembly Cache (GAC), otherwise you can't see
64 | migration-triggered exceptions. You can add Npgsql.dll to the GAC by opening a VS Developer Command Prompt as administator and
65 | running the command `gacutil /i Npgsql.dll`. You can remove it from the GAC with `gacutil /u Npgsql`.
66 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/diagnostics/tracing.md:
--------------------------------------------------------------------------------
1 | # Tracing with OpenTelemetry (experimental)
2 |
3 | > [!NOTE]
4 | >
5 | > [The OpenTelemetry specifications for database tracing](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md) are currently experimental, so Npgsql's support may change in upcoming releases.
6 |
7 | [OpenTelemetry](https://opentelemetry.io/) is a widely-adopted framework for distributed observability across many languages and components; its tracing standards allow applications and libraries to emit information on activities and events, which can be exported by the application, stored and analyzed. Activities typically have start and end times, and can encompass other activities recursively; this allows you to analyze e.g. exactly how much time was spent in the database when handling a certain HTTP call.
8 |
9 | ## Basic usage
10 |
11 | To make Npgsql emit tracing data, reference the [Npgsql.OpenTelemetry](https://www.nuget.org/packages/Npgsql.OpenTelemetry) NuGet package from your application, and set up tracing as follows:
12 |
13 | ```csharp
14 | using var tracerProvider = Sdk.CreateTracerProviderBuilder()
15 | .SetResourceBuilder(ResourceBuilder.CreateDefault().AddService("npgsql-tester"))
16 | .SetSampler(new AlwaysOnSampler())
17 | // This optional activates tracing for your application, if you trace your own activities:
18 | .AddSource("MyApp")
19 | // This activates up Npgsql's tracing:
20 | .AddNpgsql()
21 | // This prints tracing data to the console:
22 | .AddConsoleExporter()
23 | .Build();
24 | ```
25 |
26 | Once this is done, you should start seeing Npgsql trace data appearing in your application's console. At this point, you can look into exporting your trace data to a more useful destination: systems such as [Zipkin](https://zipkin.io/) or [Jaeger](https://www.jaegertracing.io/) can efficiently collect and store your data, and provide user interfaces for querying and exploring it. Setting these up in your application is quite easy - simply replace the console exporter with the appropriate exporter for the chosen system.
27 |
28 | For example, Zipkin visualizes traces in the following way:
29 |
30 | 
31 |
32 | In this trace, the Npgsql query (to database testdb) took around 800ms, and was nested inside the application's `work1` activity, which also had another unrelated `subtask1`. This allows understanding the relationships between the different activities, and where time is being spent.
33 |
34 | ## Configuration options
35 |
36 | > [!NOTE]
37 | >
38 | > This feature was introduced in Npgsql 9.0
39 |
40 | Once you've enabled Npgsql tracing as above, you can tweak its configuration via the API:
41 |
42 | ```csharp
43 | dataSourceBuilder.ConfigureTracing(o => o
44 | // Set the command SQL as the span name
45 | .ConfigureCommandSpanNameProvider(cmd => cmd.CommandText)
46 | // Filter out COMMIT commands
47 | .ConfigureCommandFilter(cmd => !cmd.CommandText.StartsWith("COMMIT", StringComparison.OrdinalIgnoreCase)));
48 | ```
49 |
50 | This allows you to:
51 |
52 | * Specify a filter which determines which commands get traced
53 | * Set the tracing span name (e.g. use the command's SQL as the span name)
54 | * Add arbitrary tags to the tracing span, based on the command
55 | * Disable the time-to-first-read event that's emitted in spans
56 | * Disable tracing physical connection open span
57 |
58 | ## Using `AsyncLocal` to pass arbitrary information to your callbacks
59 |
60 | The callbacks available via only accept the or as their parameters; this makes it difficult to e.g. assign arbitrary names to your commands, so that show up as the span names in your tracing monitor. You can use .NET [`AsyncLocal`](https://learn.microsoft.com/dotnet/api/system.threading.asynclocal-1) to flow arbitrary information from the command call site (where you execute the command) to your tracing callbacks to achieve this.
61 |
62 | For example, the following adds an `ExecuteReaderWithSpanNameAsync` extension method to :
63 |
64 | ```c#
65 | internal static class DbCommandExtensions
66 | {
67 | internal static readonly AsyncLocal CommandName = new();
68 |
69 | public static async Task ExecuteReaderWithSpanNameAsync(this NpgsqlCommand command, string spanName)
70 | {
71 | var previousValue = CommandName.Value;
72 | CommandName.Value = "FetchAllUsers";
73 |
74 | try
75 | {
76 | return await command.ExecuteReaderAsync();
77 | }
78 | finally
79 | {
80 | CommandName.Value = previousValue;
81 | }
82 | }
83 | }
84 | ```
85 |
86 | You can now configure your data source to use this span name in commands:
87 |
88 | ```c#
89 | dataSourceBuilder.ConfigureTracing(o =>
90 | o.ConfigureCommandSpanNameProvider(_ =>
91 | DbCommandExtensions.CommandName.Value));
92 | ```
93 |
94 | At this point, you can execute commands as follows, and see the provided value appearing in your tracing:
95 |
96 | ```c#
97 | await using var reader = await command.ExecuteReaderWithSpanNameAsync("FetchAllUsers");
98 | ```
99 |
100 | We'll likely work on future improvements to streamline this and make the above unnecessary.
101 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/release-notes/2.2.md:
--------------------------------------------------------------------------------
1 | # 2.2 Release Notes
2 |
3 | Version 2.2.0 of the Npgsql Entity Framework Core provider has been released and is available on nuget. This version works with [version 2.2.0 of Entity Framework Core](https://docs.microsoft.com/ef/core/what-is-new/ef-core-2.2), and contains some new Npgsql features as well.
4 |
5 | This release was result of hard work by [@roji](https://github.com/roji/), [@austindrenski](https://github.com/austindrenski), [@yohdeadfall](https://github.com/yohdeadfall) and [@khellang](https://github.com/khellang).
6 |
7 | ## New Features
8 |
9 | Aside from general EF Core features new in 2.2.0, the Npgsql EF Core provider contains the following major new features:
10 |
11 | ### PostgreSQL 11 covering indexes
12 |
13 | PostgreSQL 11 introduced [covering indexes feature](https://paquier.xyz/postgresql-2/postgres-11-covering-indexes), which allow you to include "non-key" columns in your indexes. This allows you to perform index-only scans and can provide a significant performance boost. Support has been added in ([#697](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/697)):
14 |
15 | ```csharp
16 | protected override void OnConfiguring(DbContextOptionsBuilder builder)
17 | => modelBuilder.Entity()
18 | .ForNpgsqlHasIndex(b => b.Id)
19 | .ForNpgsqlInclude(b => b.Name);
20 | ```
21 |
22 | This will create an index for searching on `Id`, but containing also the column `Name`, so that reading the latter will not involve accessing the table. [See the documentation for more details](../modeling/indexes.md).
23 |
24 | Thanks to [@khellang](https://github.com/khellang) for contributing this!
25 |
26 | ### PostgreSQL user-defined ranges
27 |
28 | The provider already supported [PostgreSQL range types](https://www.postgresql.org/docs/current/rangetypes.html), but prior to 2.2 that support was limited to the built-in range types which come with PostgreSQL. [#329](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/329) extends that support to range types which you define:
29 |
30 | ```csharp
31 | protected override void OnConfiguring(DbContextOptionsBuilder builder)
32 | => builder.UseNpgsql("...", b => b.MapRange("floatrange"));
33 |
34 | protected override void OnModelCreating(ModelBuilder builder)
35 | => builder.ForNpgsqlHasRange("floatrange", "real");
36 | ```
37 |
38 | This will make the provider create a PostgreSQL range called `floatrange`, over the PostgreSQL type `real`. Any property with type `NpgsqlRange` will be seamlessly mapped to it.
39 |
40 | [See the documentation for more details](../mapping/range.md).
41 |
42 | ### Seeding for Npgsql-specific types
43 |
44 | When using some Npgsql-specific types, it wasn't possible to seed values for those types. With EF Core support for seeding any type, [#667](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/667) allows seeding values for network, bit and range types (more are coming).
45 |
46 | ### PostgreSQL index operator classes
47 |
48 | PostgreSQL allows you to specify [operator classes on your indexes](https://www.postgresql.org/docs/current/indexes-opclass.html), to allow tweaking how the index should work. [#481](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues/481) adds support for managing these. [See the documentation for more details](../modeling/indexes.md).
49 |
50 | Thanks to [@khellang](https://github.com/khellang) for contributing this!
51 |
52 | ### Other features
53 |
54 | * Various issues with enum and range types were fixed, including upper/lower case, quoting and schema management.
55 | * Many new SQL translations were added, so more of your LINQ expressions can run in the database. We'll be working on our documentation to make these more discoverable.
56 |
57 | The full list of issues for this release is [available here](https://github.com/npgsql/Npgsql.EntityFrameworkCore.PostgreSQL/issues?q=milestone%3A2.2.0+is%3Aclosed&utf8=%E2%9C%93).
58 |
59 | ## Contributors
60 |
61 | Thank you very much to the following people who have contributed to the individual 2.2.x. releases.
62 |
63 | ### [Milestone 2.2.6](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.6)
64 |
65 | Contributor | Assigned issues
66 | ------------------------------------------------ | ----------------:|
67 | [@roji](https://github.com/roji) |[5](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.6+is%3Aclosed+assignee%3Aroji)
68 |
69 | ### [Milestone 2.2.4](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.4)
70 |
71 | Contributor | Assigned issues
72 | -------------------------------------------------- | ----------------:|
73 | [@roji](https://github.com/roji) | [3](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.4+is%3Aclosed+assignee%3Aroji)
74 | [@austindrenski](https://github.com/austindrenski) | [1](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.4+is%3Aclosed+assignee%3Aaustindrenski)
75 |
76 | ### [Milestone 2.2.0](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.0)
77 |
78 | Contributor | Assigned issues
79 | -------------------------------------------------- | ----------------:|
80 | [@austindrenski](https://github.com/austindrenski) | [15](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.0+is%3Aclosed+assignee%3Aaustindrenski)
81 | [@roji](https://github.com/roji) | [8](https://github.com/npgsql/EFCore.PG/issues?q=is%3Aissue+milestone%3A2.2.0+is%3Aclosed+assignee%3Aroji)
82 |
--------------------------------------------------------------------------------
/static/LegacyDateAndTimeResolverFactory.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using Npgsql.Internal;
3 | using Npgsql.Internal.Postgres;
4 | using NpgsqlTypes;
5 |
6 | sealed class LegacyDateAndTimeResolverFactory : PgTypeInfoResolverFactory
7 | {
8 | public override IPgTypeInfoResolver CreateResolver() => new Resolver();
9 | public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver();
10 | public override IPgTypeInfoResolver CreateRangeResolver() => new RangeResolver();
11 | public override IPgTypeInfoResolver CreateRangeArrayResolver() => new RangeArrayResolver();
12 | public override IPgTypeInfoResolver CreateMultirangeResolver() => new MultirangeResolver();
13 | public override IPgTypeInfoResolver CreateMultirangeArrayResolver() => new MultirangeArrayResolver();
14 |
15 | const string Date = "pg_catalog.date";
16 | const string Time = "pg_catalog.time";
17 | const string DateRange = "pg_catalog.daterange";
18 | const string DateMultirange = "pg_catalog.datemultirange";
19 |
20 | class Resolver : IPgTypeInfoResolver
21 | {
22 | TypeInfoMappingCollection? _mappings;
23 | protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new());
24 |
25 | public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options)
26 | => type == typeof(object) ? Mappings.Find(type, dataTypeName, options) : null;
27 |
28 | static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings)
29 | {
30 | mappings.AddStructType(Date,
31 | static (options, mapping, _) => options.GetTypeInfo(typeof(DateTime), new DataTypeName(mapping.DataTypeName))!,
32 | matchRequirement: MatchRequirement.DataTypeName);
33 |
34 | mappings.AddStructType(Time,
35 | static (options, mapping, _) => options.GetTypeInfo(typeof(TimeSpan), new DataTypeName(mapping.DataTypeName))!,
36 | isDefault: true);
37 |
38 | return mappings;
39 | }
40 | }
41 |
42 | sealed class ArrayResolver : Resolver, IPgTypeInfoResolver
43 | {
44 | TypeInfoMappingCollection? _mappings;
45 | new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings));
46 |
47 | public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options)
48 | => type == typeof(object) ? Mappings.Find(type, dataTypeName, options) : null;
49 |
50 | static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings)
51 | {
52 | mappings.AddStructArrayType(Date);
53 | mappings.AddStructArrayType(Time);
54 |
55 | return mappings;
56 | }
57 | }
58 |
59 | class RangeResolver : IPgTypeInfoResolver
60 | {
61 | TypeInfoMappingCollection? _mappings;
62 | protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new());
63 |
64 | public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options)
65 | => type == typeof(object) ? Mappings.Find(type, dataTypeName, options) : null;
66 |
67 | static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings)
68 | {
69 | mappings.AddStructType>(DateRange,
70 | static (options, mapping, _) => options.GetTypeInfo(typeof(NpgsqlRange), new DataTypeName(mapping.DataTypeName))!,
71 | matchRequirement: MatchRequirement.DataTypeName);
72 |
73 | return mappings;
74 | }
75 | }
76 |
77 | sealed class RangeArrayResolver : RangeResolver, IPgTypeInfoResolver
78 | {
79 | TypeInfoMappingCollection? _mappings;
80 | new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings));
81 |
82 | public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options)
83 | => type == typeof(object) ? Mappings.Find(type, dataTypeName, options) : null;
84 |
85 | static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings)
86 | {
87 | mappings.AddStructArrayType>(DateRange);
88 |
89 | return mappings;
90 | }
91 | }
92 |
93 | class MultirangeResolver : IPgTypeInfoResolver
94 | {
95 | TypeInfoMappingCollection? _mappings;
96 | protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new());
97 |
98 | public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options)
99 | => type == typeof(object) ? Mappings.Find(type, dataTypeName, options) : null;
100 |
101 | static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings)
102 | {
103 | mappings.AddType[]>(DateMultirange,
104 | static (options, mapping, _) => options.GetTypeInfo(typeof(NpgsqlRange[]), new DataTypeName(mapping.DataTypeName))!,
105 | matchRequirement: MatchRequirement.DataTypeName);
106 |
107 | return mappings;
108 | }
109 | }
110 |
111 | sealed class MultirangeArrayResolver : MultirangeResolver, IPgTypeInfoResolver
112 | {
113 | TypeInfoMappingCollection? _mappings;
114 | new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings));
115 |
116 | public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options)
117 | => type == typeof(object) ? Mappings.Find(type, dataTypeName, options) : null;
118 |
119 | static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings)
120 | {
121 | mappings.AddArrayType[]>(DateMultirange);
122 |
123 | return mappings;
124 | }
125 | }
126 | }
127 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/misc/collations-and-case-sensitivity.md:
--------------------------------------------------------------------------------
1 | # Collations and Case Sensitivity
2 |
3 | > It's recommended that you start by reading [the general Entity Framework Core docs on collations and case sensitivity](https://docs.microsoft.com/ef/core/miscellaneous/collations-and-case-sensitivity).
4 |
5 | PostgreSQL is a case-sensitive database by default, but provides various possibilities for performing case-insensitive operations and working with collations. Depending on your PostgreSQL version, full collation support may be somewhat incomplete, so you may need to carefully review your options below and pick the one which suits you.
6 |
7 | ## PostgreSQL collations
8 |
9 | PostgreSQL has full support for managing collations, and this is the recommended way for managing textual comparisons and sorting. However, note that ; support for non-deterministic collations - coverting case-insensitivity - was relatively limited in some ways. While recent versions such as PostgreSQL 18 support case-insensitive collations quite well, previous versions did not (for example, it was not possible perform pattern matching via LIKE on columns with a non-deterministic collation). Read more about PostgreSQL collation support [in the documentation](https://www.postgresql.org/docs/current/collation.html).
10 |
11 | ### Creating a collation
12 |
13 | In PostgreSQL, collations are first-class, named database objects which can be created and dropped, just like tables. To create a collation, place the following in your context's `OnModelCreating`:
14 |
15 | ```csharp
16 | modelBuilder.HasCollation("my_collation", locale: "en-u-ks-primary", provider: "icu", deterministic: false);
17 | ```
18 |
19 | This creates a collation with the name `my_collation`: this is an arbitrary name you can choose, which you will be specifying later when assigning the collation to columns. The rest of the parameters instruct PostgreSQL to create a non-deterministic, case-insensitive ICU collation. ICU collations are very powerful, and allow you to specify precise rules with regards to case, accents and other textual aspects. Consult [the ICU docs](https://unicode-org.github.io/icu/userguide/collation/) for more information on supported features and keywords.
20 |
21 | ### Column collation
22 |
23 | Once a collation has been created in your database, you can specify it on columns:
24 |
25 | ```csharp
26 | protected override void OnModelCreating(ModelBuilder modelBuilder)
27 | {
28 | modelBuilder.HasCollation("my_collation", locale: "en-u-ks-primary", provider: "icu", deterministic: false);
29 |
30 | modelBuilder.Entity().Property(c => c.Name)
31 | .UseCollation("my_collation");
32 | }
33 | ```
34 |
35 | This will cause all textual operators on this column to be case-insensitive.
36 |
37 | ### Database collation
38 |
39 | PostgreSQL also allows you to specify collations at the database level, when it is created:
40 |
41 | ```csharp
42 | protected override void OnModelCreating(ModelBuilder modelBuilder)
43 | {
44 | modelBuilder.UseCollation("");
45 | }
46 | ```
47 |
48 | Unfortunately, the database collation is quite limited in PostgreSQL; it notably does not support non-deterministic collations (e.g. case-insensitive ones). To work around this limitation, you can use EF Core's [pre-convention model configuration](https://docs.microsoft.com/ef/core/modeling/bulk-configuration#pre-convention-configuration) feature:
49 |
50 | ```csharp
51 | protected override void ConfigureConventions(ModelConfigurationBuilder configurationBuilder)
52 | {
53 | configurationBuilder.Properties().UseCollation("my_collation");
54 | }
55 | ```
56 |
57 | All columns created with this configuration will automatically have their collation specified accordingly, and all existing columns will be altered. The end result of the above is very similar to specifying a database collation: instead of telling PostgreSQL to implicit apply a collation to all columns, EF Core will do the same for you in its migrations.
58 |
59 | ## The citext type
60 |
61 | The older PostgreSQL method for performing case-insensitive text operations is the `citext` type; it is similar to the `text` type, but operators are functions between `citext` values are implicitly case-insensitive. Use this type only if case-insensitive collations (see above) are insufficient for your purposes. [The PostgreSQL docs](https://www.postgresql.org/docs/current/citext.html) provide more information on this type.
62 |
63 | `citext` is available in a PostgreSQL-bundled extension, so you'll first have to install it:
64 |
65 | ```csharp
66 | modelBuilder.HasPostgresExtension("citext");
67 | ```
68 |
69 | Specifying that a column should use `citext` is simply a matter of setting the column's type:
70 |
71 | ### [Data Annotations](#tab/data-annotations)
72 |
73 | ```csharp
74 | public class Blog
75 | {
76 | public int Id { get; set; }
77 | [Column(TypeName = "citext")]
78 | public string Name { get; set; }
79 | }
80 | ```
81 |
82 | ### [Fluent API](#tab/fluent-api)
83 |
84 | ```csharp
85 | protected override void OnModelCreating(ModelBuilder modelBuilder)
86 | {
87 | modelBuilder.Entity().Property(b => b.Name)
88 | .HasColumnType("citext");
89 | }
90 | ```
91 |
92 | ***
93 |
94 | Some limitations (others are listed in [the PostgreSQL docs](https://www.postgresql.org/docs/current/citext.html)):
95 |
96 | * While `citext` allows case-insensitive comparisons, it doesn't handle other aspects of collations, such as accents.
97 | * Several PostgreSQL text functions are overloaded to work with `citext` as expected, but others aren't. Using a function that isn't overloaded will result in a regular, case-sensitive match.
98 | * Unlike collations, `citext` does not allow the same column to be compared case-sensitively in some queries, and and insensitively in others.
99 |
100 | ## ILIKE
101 |
102 | `ILIKE` is a PostgreSQL-specific operator that works just like `LIKE`, but is case-insensitive. If you only need to perform case-insensitive `LIKE` pattern matching, then this could be sufficient. The provider exposes this via `EF.Functions.ILike`:
103 |
104 | ```csharp
105 | var results = ctx.Blogs
106 | .Where(b => EF.Functions.ILike(b.Name, "a%b"))
107 | .ToList();
108 | ```
109 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/mapping/enum.md:
--------------------------------------------------------------------------------
1 | # Enum Type Mapping
2 |
3 | By default, any enum properties in your model will be mapped to database integers. EF Core 2.1 also allows you to map these to strings in the database with value converters.
4 |
5 | However, the Npgsql provider also allows you to map your CLR enums to [database enum types](https://www.postgresql.org/docs/current/static/datatype-enum.html). This option, unique to PostgreSQL, provides the best of both worlds: the enum is internally stored in the database as a number (minimal storage), but is handled like a string (more usable, no need to remember numeric values) and has type safety.
6 |
7 | ## Setting up your enum with EF
8 |
9 | > [!NOTE]
10 | > Enum mapping has changed considerably in EF 9.0.
11 |
12 | If you're using EF 9.0 or above, simply call `MapEnum` inside your `UseNpgsql` invocation.
13 |
14 | ### [With a connection string](#tab/with-connection-string)
15 |
16 | If you're passing a connection string to `UseNpgsql`, simply add the `MapEnum` call as follows:
17 |
18 | ```csharp
19 | builder.Services.AddDbContext(options => options.UseNpgsql(
20 | "",
21 | o => o.MapEnum("mood")));
22 | ```
23 |
24 | This configures all aspects of Npgsql to use your `Mood` enum - both at the EF and the lower-level Npgsql layer - and ensures that the enum is created in the database in EF migrations.
25 |
26 | ### [With an external NpgsqlDataSource](#tab/with-external-datasource)
27 |
28 | If you're creating an external NpgsqlDataSource and passing it to `UseNpgsql`, you must make sure to map your enum on that data independently of the EF-level setup:
29 |
30 | ```csharp
31 | var dataSourceBuilder = new NpgsqlDataSourceBuilder("");
32 | dataSourceBuilder.MapEnum();
33 | var dataSource = dataSourceBuilder.Build();
34 |
35 | builder.Services.AddDbContext(options => options.UseNpgsql(
36 | dataSource,
37 | o => o.MapEnum("mood")));
38 | ```
39 |
40 | ***
41 |
42 | ### Older EF versions
43 |
44 | On versions of EF prior to 9.0, enum setup is more involved and consists of several steps; enum mapping has to be done at the lower-level Npgsql layer, and also requires explicit configuration in the EF model for creation in the database via migrations.
45 |
46 | #### Creating your database enum
47 |
48 | First, you must specify the PostgreSQL enum type on your model, just like you would with tables, sequences or other databases objects:
49 |
50 | ```csharp
51 | protected override void OnModelCreating(ModelBuilder builder)
52 | => builder.HasPostgresEnum();
53 | ```
54 |
55 | This causes the EF Core provider to create your enum type, `mood`, with two labels: `happy` and `sad`. This will cause the appropriate migration to be created.
56 |
57 | #### Mapping your enum
58 |
59 | Even if your database enum is created, Npgsql has to know about it, and especially about your CLR enum type that should be mapped to it:
60 |
61 | ##### [NpgsqlDataSource](#tab/with-datasource)
62 |
63 | Since version 7.0, NpgsqlDataSource is the recommended way to use Npgsql. When using NpgsqlDataSource, map your enum when building your data source:
64 |
65 | ```csharp
66 | // Call MapEnum() when building your data source:
67 | var dataSourceBuilder = new NpgsqlDataSourceBuilder(/* connection string */);
68 | dataSourceBuilder.MapEnum();
69 | var dataSource = dataSourceBuilder.Build();
70 |
71 | builder.Services.AddDbContext(options => options.UseNpgsql(dataSource));
72 | ```
73 |
74 | ##### [Without NpgsqlDatasource](#tab/without-datasource)
75 |
76 | Since version 7.0, NpgsqlDataSource is the recommended way to use Npgsql. However, if you're not yet using NpgsqlDataSource, map enums by adding the following code, *before* any EF Core operations take place. An appropriate place for this is in the static constructor on your DbContext class:
77 |
78 | ```csharp
79 | static MyDbContext()
80 | => NpgsqlConnection.GlobalTypeMapper.MapEnum();
81 | ```
82 |
83 | > [!NOTE]
84 | > If you have multiple context types, all `MapEnum` invocations must be done before *any* of them is used; this means that the code cannot be in your static constructors, but must be moved to the program start.
85 |
86 | ***
87 |
88 | This code lets Npgsql know that your CLR enum type, `Mood`, should be mapped to a database enum called `mood`. Note that if your enum is in a custom schema (not `public`), you must specify that schema in the call to `MapEnum`.
89 |
90 | ## Using enum properties
91 |
92 | Once your enum is properly set up with EF, you can use your CLR enum type just like any other property:
93 |
94 | ```csharp
95 | public class Blog
96 | {
97 | public int Id { get; set; }
98 | public Mood Mood { get; set; }
99 | }
100 |
101 | using (var ctx = new MyDbContext())
102 | {
103 | // Insert
104 | ctx.Blogs.Add(new Blog { Mood = Mood.Happy });
105 | ctx.Blogs.SaveChanges();
106 |
107 | // Query
108 | var blog = ctx.Blogs.Single(b => b.Mood == Mood.Happy);
109 | }
110 | ```
111 |
112 | ## Altering enum definitions
113 |
114 | The Npgsql provider only allow adding new values to existing enums, and the appropriate migrations will be automatically created as you add values to your CLR enum type. However, PostgreSQL itself doesn't support removing enum values (since these may be in use), and while renaming values is supported, it isn't automatically done by the provider to avoid using unreliable detection heuristics. Renaming an enum value can be done by including [raw SQL](https://docs.microsoft.com/en-us/ef/core/managing-schemas/migrations/managing?tabs=dotnet-core-cli#arbitrary-changes-via-raw-sql) in your migrations as follows:
115 |
116 | ```csharp
117 | migrationBuilder.Sql("ALTER TYPE mood RENAME VALUE 'happy' TO 'thrilled';");
118 | ```
119 |
120 | As always, test your migrations carefully before running them on production databases.
121 |
122 | ## Scaffolding from an existing database
123 |
124 | If you're creating your model from an existing database, the provider will recognize enums in your database, and scaffold the appropriate `HasPostgresEnum()` lines in your model. However, the scaffolding process has no knowledge of your CLR type, and will therefore skip your enum columns (warnings will be logged). You will have to create the CLR type and perform the proper setup as described above.
125 |
126 | In the future it may be possible to scaffold the actual enum type (and with it the properties), but this isn't supported at the moment.
127 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/mapping/array.md:
--------------------------------------------------------------------------------
1 | # Array Type Mapping
2 |
3 | PostgreSQL has the unique feature of supporting [*array data types*](https://www.postgresql.org/docs/current/static/arrays.html). This allow you to conveniently and efficiently store several values in a single column, where in other database you'd typically resort to concatenating the values in a string or defining another table with a one-to-many relationship.
4 |
5 | > [!NOTE]
6 | > Although PostgreSQL supports multidimensional arrays, these aren't yet supported by the EF Core provider.
7 |
8 | ## Mapping arrays
9 |
10 | Simply define a regular .NET array or `List<>` property:
11 |
12 | ```csharp
13 | public class Post
14 | {
15 | public int Id { get; set; }
16 | public string Name { get; set; }
17 | public string[] Tags { get; set; }
18 | public List AlternativeTags { get; set; }
19 | }
20 | ```
21 |
22 | The provider will create `text[]` columns for the above two properties, and will properly detect changes in them - if you load an array and change one of its elements, calling `SaveChanges` will automatically update the row in the database accordingly.
23 |
24 | ## Operation translation
25 |
26 | The provider can also translate CLR array operations to the corresponding SQL operation; this allows you to efficiently work with arrays by evaluating operations in the database and avoids pulling all the data. The following table lists the range operations that currently get translated; all these translations work both for .NET arrays (`int[]`) and for generic Lists (`List`). If you run into a missing operation, please open an issue.
27 |
28 | .NET | SQL | Notes
29 | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----
30 | array[0] | [array\[1\]](https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING) |
31 | array.Length / list.Count | [cardinality(array)](https://www.postgresql.org/docs/current/static/functions-array.html#ARRAY-FUNCTIONS-TABLE) |
32 | array.Skip(2) | [array\[3,\]](https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING) | Added in 8.0
33 | array.Take(2) | [array\[,2\]](https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING) | Added in 8.0
34 | array.Skip(1).Take(2) | [array\[2,3\]](https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING) | Added in 8.0
35 | array1 == array2 | [array1 = array2](https://www.postgresql.org/docs/current/static/arrays.html) |
36 | array1.SequenceEqual(array2) | [array1 = array2](https://www.postgresql.org/docs/current/static/arrays.html) |
37 | arrayNonColumn.Contains(element) | [element = ANY(arrayNonColumn)](https://www.postgresql.org/docs/current/static/functions-comparisons.html#AEN21104) | Can use regular index
38 | arrayColumn.Contains(element) | [arrayColumn @> ARRAY\[element\]](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-OPERATORS-TABLE) | Can use GIN index
39 | array.Append(element) | [array_append(array, element)](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-FUNCTIONS-TABLE) | Added in 6.0
40 | array.Where(i => i != 8) | [array_remove(array, value)](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-FUNCTIONS-TABLE) | Added in 9.0
41 | array1.Concat(array2) | [array1 \|\| array2](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-FUNCTIONS-TABLE) | Added in 6.0
42 | array.IndexOf(element) | [array_position(array, element) - 1](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-FUNCTIONS-TABLE) | Added in 6.0
43 | array.IndexOf(element, startIndex) | [array_position(array, element, startIndex + 1) - 1](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-FUNCTIONS-TABLE) | Added in 6.0
44 | String.Join(separator, array) | [array_to_string(array, separator, '')](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-FUNCTIONS-TABLE) | Added in 6.0
45 | array.Any() | [cardinality(array) > 0](https://www.postgresql.org/docs/current/static/functions-array.html#ARRAY-FUNCTIONS-TABLE) |
46 | array1.Intersect(array2).Any() | [array1 && array2](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-OPERATORS-TABLE) | Added in 8.0
47 | array1.Any(i => array2.Contains(i)) | [array1 && array2](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-OPERATORS-TABLE) |
48 | array1.All(i => array2.Contains(i)) | [array1 <@ array2](https://www.postgresql.org/docs/current/functions-array.html#ARRAY-OPERATORS-TABLE) |
49 | array.Any(s => EF.Functions.Like(string, s)) | [string LIKE ANY (array)](https://www.postgresql.org/docs/current/functions-comparisons.html#id-1.5.8.30.16) |
50 | array.Any(s => EF.Functions.ILike(string, s)) | [string ILIKE ANY (array)](https://www.postgresql.org/docs/current/functions-comparisons.html#id-1.5.8.30.16) |
51 | array.All(s => EF.Functions.Like(string, s)) | [string LIKE ALL (array)](https://www.postgresql.org/docs/current/functions-comparisons.html#id-1.5.8.30.16) |
52 | array.All(s => EF.Functions.ILike(string, s)) | [string ILIKE ALL (array)](https://www.postgresql.org/docs/current/functions-comparisons.html#id-1.5.8.30.16) |
53 | EF.Functions.ArrayAgg(values) | [array_agg(values)](https://www.postgresql.org/docs/current/functions-aggregate.html#FUNCTIONS-AGGREGATE-TABLE) | Added in 7.0, See [Aggregate functions](translations.md#aggregate-functions).
54 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/types/nts.md:
--------------------------------------------------------------------------------
1 | # PostGIS/NetTopologySuite Type Plugin
2 |
3 | PostgreSQL supports spatial data and operations via [the PostGIS extension](https://postgis.net/), which is a mature and feature-rich database spatial implementation. .NET doesn't provide a standard spatial library, but [NetTopologySuite](https://github.com/NetTopologySuite/NetTopologySuite) is a leading spatial library. Npgsql has a plugin which allows you to map the NTS types PostGIS columns, and even translate many useful spatial operations to SQL. This is the recommended way to interact with spatial types in Npgsql.
4 |
5 | PostgreSQL provides support for spatial types (geometry/geography) via the powerful [PostGIS](https://postgis.net/) extension; this allows you to store points and other spatial constructs in the database, and efficiently perform operations and searches on them. Npgsql supports the PostGIS types via [NetTopologySuite](https://github.com/NetTopologySuite/NetTopologySuite), which is the leading spatial library in the .NET world: the NTS types can be read and written directly to their corresponding PostGIS types. This is the recommended way to work with spatial types in Npgsql.
6 |
7 | ## Setup
8 |
9 | To avoid forcing a dependency on the NetTopologySuite library for users not using spatial, NTS support is delivered as a separate plugin. To use the plugin, simply add a dependency on [Npgsql.NetTopologySuite](https://www.nuget.org/packages/Npgsql.NetTopologySuite) and set it up in one of the following ways:
10 |
11 | ### [NpgsqlDataSource](#tab/datasource)
12 |
13 | > [!NOTE]
14 | > `NpgsqlDataSource` was introduced in Npgsql 7.0, and is the recommended way to manage type mapping. If you're using an older version, see the other methods.
15 |
16 | ```csharp
17 | var dataSourceBuilder = new NpgsqlDataSourceBuilder(...);
18 | dataSourceBuilder.UseNetTopologySuite();
19 | await using var dataSource = dataSourceBuilder.Build();
20 | ```
21 |
22 | ### [Global mapping](#tab/global)
23 |
24 | If you're using an older version of Npgsql which doesn't yet support `NpgsqlDataSource`, you can configure mappings globally for all connections in your application:
25 |
26 | ```csharp
27 | NpgsqlConnection.GlobalTypeMapper.UseNetTopologySuite();
28 | ```
29 |
30 | For this to work, you must place this code at the beginning of your application, before any other Npgsql API is called. Note that in Npgsql 7.0, global type mappings are obsolete (but still supported) - `NpgsqlDataSource` is the recommended way to manage type mappings.
31 |
32 | ### [Connection mapping](#tab/connection)
33 |
34 | > [!NOTE]
35 | > This mapping method has been removed in Npgsql 7.0.
36 |
37 | Older versions of Npgsql supported configuring a type mapping on an individual connection, as follows:
38 |
39 | ```csharp
40 | var conn = new NpgsqlConnection(...);
41 | conn.TypeMapper.UseNetTopologySuite();
42 | ```
43 |
44 | ***
45 |
46 | By default the plugin handles only ordinates provided by the `DefaultCoordinateSequenceFactory` of `GeometryServiceProvider.Instance`. If `GeometryServiceProvider` is initialized automatically the X and Y ordinates are handled. To change the behavior specify the `handleOrdinates` parameter like in the following example:
47 |
48 | ```csharp
49 | dataSourceBuilder.UseNetTopologySuite(handleOrdinates: Ordinates.XYZ);
50 | ```
51 |
52 | To process the M ordinate, you must initialize `GeometryServiceProvider.Instance` to a new `NtsGeometryServices` instance with `coordinateSequenceFactory` set to a `DotSpatialAffineCoordinateSequenceFactory`. Or you can specify the factory when calling `UseNetTopologySuite`.
53 |
54 | ```csharp
55 | // Place this at the beginning of your program to use the specified settings everywhere (recommended)
56 | GeometryServiceProvider.Instance = new NtsGeometryServices(
57 | new DotSpatialAffineCoordinateSequenceFactory(Ordinates.XYM),
58 | new PrecisionModel(PrecisionModels.Floating),
59 | -1);
60 |
61 | // Or specify settings for Npgsql only
62 | dataSourceBuilder.UseNetTopologySuite.UseNetTopologySuite(
63 | new DotSpatialAffineCoordinateSequenceFactory(Ordinates.XYM));
64 | ```
65 |
66 | ## Reading and Writing Geometry Values
67 |
68 | When reading PostGIS values from the database, Npgsql will automatically return the appropriate NetTopologySuite types: `Point`, `LineString`, and so on. Npgsql will also automatically recognize NetTopologySuite's types in parameters, and will automatically send the corresponding PostGIS type to the database. The following code demonstrates a roundtrip of a NetTopologySuite `Point` to the database:
69 |
70 | ```csharp
71 | await conn.ExecuteNonQueryAsync("CREATE TEMP TABLE data (geom GEOMETRY)");
72 |
73 | await using (var cmd = new NpgsqlCommand("INSERT INTO data (geom) VALUES ($1)", conn))
74 | {
75 | cmd.Parameters.Add(new() { Value = new Point(new Coordinate(1d, 1d)) });
76 | await cmd.ExecuteNonQueryAsync();
77 | }
78 |
79 | await using (var cmd = new NpgsqlCommand("SELECT geom FROM data", conn))
80 | await using (var reader = await cmd.ExecuteReaderAsync())
81 | {
82 | await reader.ReadAsync();
83 | var point = reader.GetFieldValue(0);
84 | }
85 | ```
86 |
87 | You may also explicitly specify a parameter's type by setting `NpgsqlDbType.Geometry`.
88 |
89 | ## Geography (geodetic) Support
90 |
91 | PostGIS has two types: `geometry` (for Cartesian coordinates) and `geography` (for geodetic or spherical coordinates). You can read about the geometry/geography distinction [in the PostGIS docs](https://postgis.net/docs/manual-2.4/using_postgis_dbmanagement.html#PostGIS_Geography) or in [this blog post](http://workshops.boundlessgeo.com/postgis-intro/geography.html). In a nutshell, `geography` is much more accurate when doing calculations over long distances, but is more expensive computationally and supports only a small subset of the spatial operations supported by `geometry`.
92 |
93 | Npgsql uses the same NetTopologySuite types to represent both `geometry` and `geography` - the `Point` type represents a point in either Cartesian or geodetic space. You usually don't need to worry about this distinction because PostgreSQL will usually cast types back and forth as needed. However, it's worth noting that Npgsql sends Cartesian `geometry` by default, because that's the usual requirement. You have the option of telling Npgsql to send `geography` instead by specifying `NpgsqlDbType.Geography`:
94 |
95 | ```csharp
96 | await using (var cmd = new NpgsqlCommand("INSERT INTO data (geog) VALUES ($1)", conn))
97 | {
98 | cmd.Parameters.Add(new() { Value = point, NpgsqlDbType = NpgsqlDbType.Geography });
99 | await cmd.ExecuteNonQueryAsync();
100 | }
101 | ```
102 |
103 | If you prefer to use `geography` everywhere by default, you can also specify that when setting up the plugin:
104 |
105 | ```csharp
106 | dataSourceBuilder.UseNetTopologySuite(geographyAsDefault: true);
107 | ```
108 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/types/enums_and_composites.md:
--------------------------------------------------------------------------------
1 | # PostgreSQL enums and composites
2 |
3 | PostgreSQL supports [enum types](http://www.postgresql.org/docs/current/static/datatype-enum.html) and [composite types](http://www.postgresql.org/docs/current/static/rowtypes.html) as database columns, and Npgsql supports reading and writing these. This allows you to seamlessly read and write enum and composite values to the database without worrying about conversions.
4 |
5 | ## Creating your types
6 |
7 | Let's assume you've created some enum and composite types in PostgreSQL:
8 |
9 | ```sql
10 | CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');
11 |
12 | CREATE TYPE inventory_item AS (
13 | name text,
14 | supplier_id integer,
15 | price numeric
16 | );
17 | ```
18 |
19 | To use these types with Npgsql, you must first define corresponding CLR types that will be mapped to the PostgreSQL types:
20 |
21 | ```csharp
22 | public enum Mood
23 | {
24 | Sad,
25 | Ok,
26 | Happy
27 | }
28 |
29 | public class InventoryItem
30 | {
31 | public string Name { get; set; } = "";
32 | public int SupplierId { get; set; }
33 | public decimal Price { get; set; }
34 | }
35 | ```
36 |
37 | ## Mapping your CLR types
38 |
39 | Once your types are defined both in PostgreSQL and in C#, you can now configure the mapping between them with Npgsql.
40 |
41 | ### [NpgsqlDataSource](#tab/datasource)
42 |
43 | > [!NOTE]
44 | > `NpgsqlDataSource` was introduced in Npgsql 7.0, and is the recommended way to manage type mapping. If you're using an older version, see the other methods.
45 |
46 | ```csharp
47 | var dataSourceBuilder = new NpgsqlDataSourceBuilder(...);
48 | dataSourceBuilder.MapEnum();
49 | dataSourceBuilder.MapComposite();
50 | await using var dataSource = dataSourceBuilder.Build();
51 | ```
52 |
53 | ### [Global mapping](#tab/global)
54 |
55 | If you're using an older version of Npgsql which doesn't yet support `NpgsqlDataSource`, you can configure mappings globally for all connections in your application:
56 |
57 | ```csharp
58 | NpgsqlConnection.GlobalTypeMapper.MapEnum();
59 | NpgsqlConnection.GlobalTypeMapper.MapComposite();
60 | ```
61 |
62 | For this to work, you must place this code at the beginning of your application, before any other Npgsql API is called. Note that in Npgsql 7.0, global type mappings are obsolete (but still supported) - `NpgsqlDataSource` is the recommended way to manage type mappings.
63 |
64 | ### [Connection mapping](#tab/connection)
65 |
66 | > [!NOTE]
67 | > This mapping method has been removed in Npgsql 7.0.
68 |
69 | Older versions of Npgsql supported configuring a type mapping on an individual connection, as follows:
70 |
71 | ```csharp
72 | var conn = new NpgsqlConnection(...);
73 | conn.TypeMapper.MapEnum();
74 | conn.TypeMapper.MapComposite();
75 | ```
76 |
77 | ***
78 |
79 | Whatever the method used, your CLR types `Mood` and `InventoryItem` are now mapped to the PostgreSQL types `mood` and `inventory_item`.
80 |
81 | ## Using your mapped types
82 |
83 | Once your mapping is in place, you can read and write your CLR types as usual:
84 |
85 | ```csharp
86 | // Writing
87 | await using (var cmd = new NpgsqlCommand("INSERT INTO some_table (my_enum, my_composite) VALUES ($1, $2)", conn))
88 | {
89 | cmd.Parameters.Add(new() { Value = Mood.Happy });
90 | cmd.Parameters.Add(new()
91 | {
92 | Value = new InventoryItem { ... }
93 | });
94 | cmd.ExecuteNonQuery();
95 | }
96 |
97 | // Reading
98 | await using (var cmd = new NpgsqlCommand("SELECT my_enum, my_composite FROM some_table", conn))
99 | await using (var reader = cmd.ExecuteReader()) {
100 | reader.Read();
101 | var enumValue = reader.GetFieldValue(0);
102 | var compositeValue = reader.GetFieldValue(1);
103 | }
104 | ```
105 |
106 | Note that your PostgreSQL enum and composites types (`mood` and `inventory_data` in the sample above) must be defined in your database before the first connection is created (see `CREATE TYPE`). If you're creating PostgreSQL types within your program, call `NpgsqlConnection.ReloadTypes()` to make sure Npgsql becomes properly aware of them.
107 |
108 | [When preparing commands](../prepare.md), you need to specify the database type of parameters without specifying an actual parameter value. This is usually done via the enum; but since enums and composites are user-defined types, they have no labels in that enum. Instead, set the property on the parameter to the name of your type ([see these docs for more information on parameter types](../basic-usage.md#parameter-types)).
109 |
110 | ## Name translation
111 |
112 | CLR type and field names are usually Pascal case (e.g. `InventoryData`), whereas in PostgreSQL they are snake case (e.g. `inventory_data`). To help make the mapping for enums and composites seamless, pluggable name translators are used translate all names. The default translation scheme is `NpgsqlSnakeCaseNameTranslator`, which maps names like `SomeType` to `some_type`, but you can specify others. The default name translator can be set for all your connections via `NpgsqlConnection.GlobalTypeMapper.DefaultNameTranslator`, or for a specific connection for `NpgsqlConnection.TypeMapper.DefaultNameTranslator`. You also have the option of specifying a name translator when setting up a mapping:
113 |
114 | ```csharp
115 | NpgsqlConnection.GlobalTypeMapper.MapComposite("inventory_data", new NpgsqlNullNameTranslator());
116 | ```
117 |
118 | Finally, you may control mappings on a field-by-field basis via the `[PgName]` attribute. This overrides the name translator.
119 |
120 | ```csharp
121 | public enum Mood
122 | {
123 | [PgName("depressed")]
124 | Sad,
125 | Ok,
126 | [PgName("ebullient")]
127 | Happy
128 | }
129 | ```
130 |
131 | ## Reading and writing unmapped enums
132 |
133 | In some cases, it may be desirable to interact with PostgreSQL enums without a pre-existing CLR enum type - this is useful mainly if your program doesn't know the database schema and types in advance, and needs to interact with any enum/composite type.
134 |
135 | Npgsql allows reading and writing enums as simple strings:
136 |
137 | ```csharp
138 | // Writing enum as string
139 | await using (var cmd = new NpgsqlCommand("INSERT INTO some_table (my_enum) VALUES ($1)", conn))
140 | {
141 | cmd.Parameters.Add(new()
142 | {
143 | Value = "Happy"
144 | DataTypeName = "mood"
145 | });
146 | cmd.ExecuteNonQuery();
147 | }
148 |
149 | // Reading enum as string
150 | await using (var cmd = new NpgsqlCommand("SELECT my_enum FROM some_table", conn))
151 | await using (var reader = cmd.ExecuteReader()) {
152 | reader.Read();
153 | var enumValue = reader.GetFieldValue(0);
154 | }
155 | ```
156 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/performance.md:
--------------------------------------------------------------------------------
1 | # Performance
2 |
3 | ## Diagnostics
4 |
5 | To be able to improve performance, you first need to be able to see which queries are slow, and generally observe how your application is behaving. PostgreSQL provide some powerful features for knowing what's going on in the database; [the statistics collector](https://www.postgresql.org/docs/current/monitoring-stats.html) is a good place to start, and in particular the `pg_stat_activity` table, which shows which queries are being executed at any given point.
6 |
7 | Beyond PostgreSQL, Npgsql provides its own set of diagnostics features for tracing, logging and producing metrics. Tracing and metrics are particularly useful for performance analysis - consider collecting this data continuously on your production platform. These features are documented in the dedicated [diagnostics page](diagnostics/overview.md).
8 |
9 | ## Prepared Statements
10 |
11 | One of the most important (and easy) ways to improve your application's performance is to prepare your commands. Even if you're not coding against ADO.NET directly (e.g. using Dapper or an O/RM), Npgsql has an automatic preparation feature which allows you to benefit from the performance gains associated with prepared statements. [See this blog post](http://www.roji.org/prepared-statements-in-npgsql-3-2) and/or [the documentation](prepare.md) for more details.
12 |
13 | ## Batching/Pipelining
14 |
15 | When you execute a command, Npgsql executes a roundtrip to the database. If you execute multiple commands (say, inserting 3 rows or performing multiple selects), you're executing multiple roundtrips; each command has to complete before the next command can start execution. Depending on your network latency, this can considerably degrade your application's performance.
16 |
17 | You can batch multiple SQL statements in a single roundtrip:
18 |
19 | ```csharp
20 | var batch = new NpgsqlBatch(connection)
21 | {
22 | BatchCommands = { new("SELECT ..."), new("SELECT ...") }
23 | };
24 | await using (var reader = await batch.ExecuteReaderAsync())
25 | {
26 | while (await reader.ReadAsync()) {
27 | // Read first resultset
28 | }
29 |
30 | await reader.NextResultAsync();
31 |
32 | while (await reader.ReadAsync()) {
33 | // Read second resultset
34 | }
35 | }
36 | ```
37 |
38 | ## Disable enlisting to TransactionScope
39 |
40 | By default, Npgsql will enlist to ambient transactions. This occurs when a connection is opened while inside a `TransactionScope`, and can provide a powerful programming model for working with transactions. However, this involves checking whether an ambient transaction is in progress each time a (pooled) connection is open, an operation that takes more time than you'd think. Scenarios where connections are very short-lived and open/close happens very frequently can benefit from removing this check - simply include `Enlist=false` in the connection string. Note that you can still enlist manually by calling `NpgsqlConnection.Enlist()`.
41 |
42 | ## Pooled Connection Reset
43 |
44 | When a pooled connection is closed, Npgsql will arrange for its state to be reset the next time it's used. This prevents leakage of state from one usage cycle of a physical connection to another one. For example, you may change certain PostgreSQL parameters (e.g. `statement_timeout`), and it's undesirable for this change to persist when the connection is closed.
45 |
46 | Connection reset happens via the PostgreSQL [`DISCARD ALL` command](https://www.postgresql.org/docs/current/static/sql-discard.html), or, if there are any prepared statements at the time of closing, by a combination of the equivalent statements described in the docs (to prevent closing those statements). Note that these statements aren't actually sent when closing the connection - they're written into Npgsql's internal write buffer, and will be sent with the first user statement after the connection is reopened. This prevents a costly database roundtrip.
47 |
48 | If you really want to squeeze every last bit of performance from PostgreSQL, you may disable connect reset by specifying `No Reset On Close` on your connection string - this will slightly improve performance in scenarios where connection are very short-lived, and especially if prepared statements are in use.
49 |
50 | ## Reading Large Values
51 |
52 | When reading results from PostgreSQL, Npgsql first reads raw binary data from the network into an internal read buffer, and then parses that data as you call methods such as `NpgsqlDataReader.GetString()`. While this allows for efficient network reads, it's worth thinking about the size of this buffer, which is 8K by default. Under normal usage, Npgsql attempts to read each row into the buffer; if that entire row fits in 8K, you'll have optimal performance. However, if a row is bigger than 8K, Npgsql will allocate an "oversize buffer", which will be used until the connection is closed or returned to the pool. If you're not careful, this can create significant memory churn that will slow down your application. To avoid this, if you know you're going to be reading 16k rows, you can specify `Read Buffer Size=18000` in your connection string (leaving some margin for protocol overhead), this will ensure that the read buffer is reused and no extra allocation occur.
53 |
54 | Another option is to pass `CommandBehavior.SequentialAccess` to `NpgsqlCommand.ExecuteReader()`. Sequential mode means that Npgsql will no longer read entire rows into its buffer, but will rather fill up the buffer as needed, reading more data only when it's empty. The same 8K read buffer will be used regardless of the row's total size, and Npgsql will take care of the details. In sequential mode, however, you must read the row's fields in the order in which you specified them; you cannot read the 2nd field and then go back to the 1st field, and trying to do so will generate an exception. Similarly, you cannot read the same field twice - once you've read a field, it has been consumed.
55 |
56 | For more information on `CommandBehavior.SequentialAccess`, see [this page](https://msdn.microsoft.com/en-us/library/87z0hy49(v=vs.110).aspx). If you decide to use this feature, be aware that it isn't used as often and may therefore contain bugs.
57 |
58 | You can also control the socket's receive buffer size (not to be confused with Npgsql's internal buffer) by setting the `Socket Receive Buffer Size` connection string parameter.
59 |
60 | ## Writing Large Values
61 |
62 | Writing is somewhat similar - Npgsql has an internal write buffer (also 8K by default). When writing your query's SQL and parameters to PostgreSQL, Npgsql always writes "sequentially", that is, filling up the 8K buffer and flushing it when full. You can use `Write Buffer Size` to control the buffer's size.
63 |
64 | You can also control the socket's send buffer size (not to be confused with Npgsql's internal buffer) by setting the `Socket Send Buffer Size` connection string parameter.
65 |
66 | ## Avoiding boxing when writing parameter values
67 |
68 | See [this section](basic-usage.md#strongly-typed-parameters).
69 |
70 | ## Unix Domain Socket
71 |
72 | If you're connecting to a PostgreSQL server on the same machine, you can boost performance a little by connecting via Unix domain socket rather than via a regular TCP/IP socket. To do this, simply specify the directory of your PostgreSQL sockets in the `Host` connection string parameter - if this parameter starts with a slash or drive letter, it will be taken to mean a filesystem path.
73 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/release-notes/3.2.md:
--------------------------------------------------------------------------------
1 | # Npgsql 3.2 Release Notes
2 |
3 | Npgsql 3.2 is out and available on nuget.org. This is a major release with substantial internal changes and should be deployed with care. For critical applications it may be advisable to wait until 3.2.1 is out.
4 |
5 | This release contains a large number of new features, but the main focus is performance - some usage scenarios may show dramatic improvements. See below for more details.
6 |
7 | ## Major Changes
8 |
9 | * Prepared statements are now persistent (survive beyond pooled connection close/open), providing significant performance improvements for applications with short-lived connections, such as most webapps ([#483](https://github.com/npgsql/npgsql/issues/483)). Also, statements can optionally be prepared automatically by Npgsql based on use, unlocking prepared statement performance for O/RMs and data layers which don't prepare themselves, such as Dapper or Entity Framework Core ([#1237](https://github.com/npgsql/npgsql/issues/1237)). See [this blog post for more info](http://www.roji.org/prepared-statements-in-npgsql-3-2).
10 | * The internal I/O system has been overhauled to continue supporting sync and async I/O, but with a vastly better coding model. This should eliminate most protocol sync bugs, and make it much easier to maintain and write new type handlers ([#1326](https://github.com/npgsql/npgsql/issues/1326)).
11 | * Kerberos login ("integrated security") is now support on Linux/Mac ([#1079](https://github.com/npgsql/npgsql/issues/1079)).
12 | * Support for System.Transactions and distributed transactions has been rewritten, and should have fewer problems than before ([#122](https://github.com/npgsql/npgsql/issues/122)).
13 | * Performance counters have been implemented, [similar to what SqlClient provides](https://msdn.microsoft.com/library/ms254503(v=vs.110).aspx). ([#619](https://github.com/npgsql/npgsql/issues/619)).
14 | * The Visual Studio integration extension (DDEX) has been rewritten for a much better installation experience, and includes some new features as well ([#1407](https://github.com/npgsql/npgsql/issues/1407)).
15 | * If your application attempts to make use of more than one connection at the same time, an "operation already in progress" was thrown. This exception now provides more information to help you track down the bug ([#1248](https://github.com/npgsql/npgsql/issues/1248)).
16 |
17 | Many other small changes have been made, especially with regards to performance. [Here's the full list](https://github.com/npgsql/npgsql/milestone/24?closed=1).
18 |
19 | ## Breaking Changes from 3.1
20 |
21 | * Connections can no longer be constructed with `NpgsqlConnectionStringBuilder` - only plain string connection strings are supported ([#1415](https://github.com/npgsql/npgsql/issues/1415)).
22 | * The `Buffer Size` connection string parameter has been replaced by `Read Buffer Size` and `Write Buffer Size`.
23 |
24 | ## Contributors
25 |
26 | Thank you very much to the following people who have contributed to the individual 3.2.x. releases.
27 |
28 | ### [Milestone 3.2.7](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.7)
29 |
30 | Contributor | Assigned issues
31 | ------------------------------------------------------ | ----------------:|
32 | [@roji](https://github.com/roji) | [4](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.7+is%3Aclosed+assignee%3Aroji)
33 | [@erwaller](https://github.com/erwaller) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.7+is%3Aclosed+assignee%3Aerwaller)
34 |
35 | ### [Milestone 3.2.6](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.6)
36 |
37 | Contributor | Assigned issues
38 | ------------------------------------------------------ |-----------------:|
39 | [@roji](https://github.com/roji) | [8](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.6+is%3Aclosed+assignee%3Aroji)
40 |
41 | ### [Milestone 3.2.5](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.5)
42 |
43 | Contributor | Assigned issues
44 | ------------------------------------------------------ | ----------------:|
45 | [@roji](https://github.com/roji) | [4](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.5+is%3Aclosed+assignee%3Aroji)
46 |
47 | ### [Milestone 3.2.4.1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.4.1)
48 |
49 | Contributor | Assigned issues
50 | ------------------------------------------------------ | ----------------:|
51 | [@roji](https://github.com/roji) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.4.1+is%3Aclosed+assignee%3Aroji)
52 |
53 | ### [Milestone 3.2.4](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.4)
54 |
55 | Contributor | Assigned issues
56 | ------------------------------------------------------ | ----------------:|
57 | [@roji](https://github.com/roji) | [3](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.4+is%3Aclosed+assignee%3Aroji)
58 |
59 | ### [Milestone 3.2.3](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.3)
60 |
61 | Contributor | Assigned issues
62 | ------------------------------------------------------ | ----------------:|
63 | [@roji](https://github.com/roji) | [9](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.3+is%3Aclosed+assignee%3Aroji)
64 | [@funny-falcon](https://github.com/funny-falcon) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.3+is%3Aclosed+assignee%3Afunny-falcon)
65 | [@jlareo](https://github.com/jlareo) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.3+is%3Aclosed+assignee%3Ajlareo)
66 | [@odiernod](https://github.com/odiernod) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.3+is%3Aclosed+assignee%3Aodiernod)
67 |
68 | ### [Milestone 3.2.2](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.2)
69 |
70 | Contributor | Assigned issues
71 | ------------------------------------------------------ | ----------------:|
72 | [@roji](https://github.com/roji) | [11](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.2+is%3Aclosed+assignee%3Aroji)
73 | [@kurtschelfthout](https://github.com/kurtschelfthout) | [2](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.2+is%3Aclosed+assignee%3Akurtschelfthout)
74 | [@Emill](https://github.com/Emill) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.2+is%3Aclosed+assignee%3AEmill)
75 |
76 | ### [Milestone 3.2.1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.1)
77 |
78 | Contributor | Assigned issues
79 | ------------------------------------------------------ | ----------------:|
80 | [@roji](https://github.com/roji) | [7](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2.1+is%3Aclosed+assignee%3Aroji)
81 |
82 | ### [Milestone 3.2](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2)
83 |
84 | Contributor | Assigned issues
85 | ------------------------------------------------------ | ---------------:|
86 | [@roji](https://github.com/roji) | [33](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.2+is%3Aclosed+assignee%3Aroji)
87 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/diagnostics/logging.md:
--------------------------------------------------------------------------------
1 | # Logging
2 |
3 | > [!NOTE]
4 | > Starting with version 7.0, Npgsql supports standard .NET logging via [Microsoft.Extensions.Logging](https://learn.microsoft.com/dotnet/core/extensions/logging). If you're using an earlier version of Npgsql, skip down to [this section](#old-logging).
5 |
6 | Npgsql fully supports logging various events via the standard .NET [Microsoft.Extensions.Logging](https://learn.microsoft.com/dotnet/core/extensions/logging) package. These can help debug issues and understand what's going on as your application interacts with PostgreSQL.
7 |
8 | ## Console programs
9 |
10 | To set up logging in Npgsql, create your `ILoggerFactory` as usual, and then configure an `NpgsqlDataSource` with it. Any use of connections handed out by the data source will log via your provided logger factory.
11 |
12 | The following shows a minimal console application logging to the console via [Microsoft.Extensions.Logging.Console](https://www.nuget.org/packages/Microsoft.Extensions.Logging.Console):
13 |
14 | ```csharp
15 | // Create a Microsoft.Extensions.Logging LoggerFactory, configuring it with the providers,
16 | // log levels and other desired configuration.
17 | var loggerFactory = LoggerFactory.Create(builder => builder.AddConsole());
18 |
19 | // Create an NpgsqlDataSourceBuilder, configuring it with our LoggerFactory
20 | var dataSourceBuilder = new NpgsqlDataSourceBuilder("Host=localhost;Username=test;Password=test");
21 | dataSourceBuilder.UseLoggerFactory(loggerFactory);
22 | await using var dataSource = dataSourceBuilder.Build();
23 |
24 | // Any connections handed out by the data source will log via the LoggerFactory:
25 | await using var connection = await dataSource.OpenConnectionAsync();
26 | await using var command = new NpgsqlCommand("SELECT 1", connection);
27 | _ = await command.ExecuteScalarAsync();
28 | ```
29 |
30 | Running this program outputs the following to the console:
31 |
32 | ```console
33 | info: Npgsql.Command[2001]
34 | Command execution completed (duration=16ms): SELECT 1
35 | ```
36 |
37 | By default, Npgsql logs command executions at the `Information` log level, as well as various warnings and errors. To see more detailed logging, increase the log level to `Debug` or `Trace`.
38 |
39 | ## ASP.NET and dependency injection
40 |
41 | If you're using ASP.NET, you can use the additional [Npgsql.DependencyInjection](https://www.nuget.org/packages/Npgsql.DependencyInjection) package, which provides seamless integration with dependency injection and logging:
42 |
43 | ```csharp
44 | var builder = WebApplication.CreateBuilder(args);
45 | builder.Logging.AddConsole();
46 | builder.Services.AddNpgsqlDataSource("Host=localhost;Username=test;Password=test");
47 | ```
48 |
49 | The `AddNpgsqlDataSource` arranges for a data source to be configured in the DI container, which automatically uses the logger factory configured via the standard ASP.NET means. This allows your endpoints to get injected with Npgsql connections which log to the same logger factory when used.
50 |
51 | ## Configuration without NpgsqlDataSource
52 |
53 | If your application doesn't use `NpgsqlDataSource`, you can still configure Npgsql's logger factory globally, as follows:
54 |
55 | ```csharp
56 | var loggerFactory = LoggerFactory.Create(builder => builder.AddConsole());
57 | NpgsqlLoggingConfiguration.InitializeLogging(loggerFactory);
58 |
59 | await using var conn = new NpgsqlConnection("Host=localhost;Username=test;Password=test");
60 | conn.Execute("SELECT 1");
61 | ```
62 |
63 | Note that you must call `InitializeLogging` at the start of your program, before any other Npgsql API is used.
64 |
65 | ## Parameter logging
66 |
67 | By default, when logging SQL statements, Npgsql does not log parameter values, since these may contain sensitive information. You can opt into parameter logging when debugging your application:
68 |
69 | ### [Console Program](#tab/console)
70 |
71 | ```csharp
72 | dataSourceBuilder.EnableParameterLogging();
73 | ```
74 |
75 | ### [ASP.NET Program](#tab/aspnet)
76 |
77 | ```csharp
78 | builder.Services.AddNpgsqlDataSource(
79 | "Host=localhost;Username=test;Password=test",
80 | builder => builder.EnableParameterLogging());
81 | ```
82 |
83 | ### [Without DbDataSource](#tab/without-dbdatasource)
84 |
85 | ```csharp
86 | NpgsqlLoggingConfiguration.InitializeLogging(loggerFactory, parameterLoggingEnabled: true);
87 | ```
88 |
89 | ***
90 |
91 | > [!WARNING]
92 | > Do not leave parameter logging enabled in production, as sensitive user information may leak into your logs.
93 |
94 | ## Logging in older versions of Npgsql
95 |
96 | Prior to 7.0, Npgsql had its own, custom logging API. To use this, statically inject a logging provider implementing the `INpgsqlLoggingProvider` interface as follows:
97 |
98 | ```csharp
99 | NpgsqlLogManager.Provider = new ???
100 | ```
101 |
102 | *Note: you must set the logging provider before invoking any other Npgsql method, at the very start of your program.*
103 |
104 | It's trivial to create a logging provider that passes log messages to whatever logging framework you use, you can find such an adapter for NLog below.
105 |
106 | ### ConsoleLoggingProvider
107 |
108 | Npgsql comes with one built-in logging provider: `ConsoleLoggingProvider`. It simply dumps all log messages with a given level or above to standard output.
109 | You can set it up by including the following line at the beginning of your application:
110 |
111 | ```csharp
112 | NpgsqlLogManager.Provider = new ConsoleLoggingProvider(, , );
113 | ```
114 |
115 | Level defaults to `NpgsqlLogLevel.Info` (which will only print warnings and errors).
116 | You can also have log levels and connector IDs logged.
117 |
118 | ### NLogLoggingProvider (or implementing your own)
119 |
120 | The following provider is used in the Npgsql unit tests to pass log messages to [NLog](http://nlog-project.org/).
121 | You're welcome to copy-paste it into your project, or to use it as a starting point for implementing your own custom provider.
122 |
123 | ```csharp
124 | class NLogLoggingProvider : INpgsqlLoggingProvider
125 | {
126 | public NpgsqlLogger CreateLogger(string name)
127 | {
128 | return new NLogLogger(name);
129 | }
130 | }
131 |
132 | class NLogLogger : NpgsqlLogger
133 | {
134 | readonly Logger _log;
135 |
136 | internal NLogLogger(string name)
137 | {
138 | _log = LogManager.GetLogger(name);
139 | }
140 |
141 | public override bool IsEnabled(NpgsqlLogLevel level)
142 | {
143 | return _log.IsEnabled(ToNLogLogLevel(level));
144 | }
145 |
146 | public override void Log(NpgsqlLogLevel level, int connectorId, string msg, Exception exception = null)
147 | {
148 | var ev = new LogEventInfo(ToNLogLogLevel(level), "", msg);
149 | if (exception != null)
150 | ev.Exception = exception;
151 | if (connectorId != 0)
152 | ev.Properties["ConnectorId"] = connectorId;
153 | _log.Log(ev);
154 | }
155 |
156 | static LogLevel ToNLogLogLevel(NpgsqlLogLevel level)
157 | {
158 | switch (level)
159 | {
160 | case NpgsqlLogLevel.Trace:
161 | return LogLevel.Trace;
162 | case NpgsqlLogLevel.Debug:
163 | return LogLevel.Debug;
164 | case NpgsqlLogLevel.Info:
165 | return LogLevel.Info;
166 | case NpgsqlLogLevel.Warn:
167 | return LogLevel.Warn;
168 | case NpgsqlLogLevel.Error:
169 | return LogLevel.Error;
170 | case NpgsqlLogLevel.Fatal:
171 | return LogLevel.Fatal;
172 | default:
173 | throw new ArgumentOutOfRangeException("level");
174 | }
175 | }
176 | }
177 | ```
178 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/replication.md:
--------------------------------------------------------------------------------
1 | # Logical and Physical Replication
2 |
3 | Replication allows a client to receive a continuous stream of updates from a PostgreSQL database, providing a near-realtime view of all changes as they occur. While this feature was originally developed to keep PostgreSQL standby replicas in sync with a primary, it can be used by arbitrary client applications.
4 |
5 | Replication can be used anywhere where a constant change feed of database changes is required; for example, an external application can be notified in near-realtime of any changes that occurred in a particular database table. This can be useful for external auditing purposes, for replicating certain data somewhere else, for implement the outbox pattern (see [Additional resources](#additional-resources) below), and various other usages.
6 |
7 | Npgsql provides a 1st-class API for writing .NET replication clients, detailed below. While PostgreSQL supports both logical and physical replication, in the majority of cases .NET applications will want to use logical replication.
8 |
9 | ## Logical replication
10 |
11 | Logical replication is a means to stream messages generated by PostgreSQL [logical decoding](https://www.postgresql.org/docs/current/logicaldecoding.html) plugins to a client.
12 | The default implementation that is used by PostgreSQL itself to perform logical server to server replication is the Logical Streaming Replication Protocol which uses the `pgoutput` plugin, but PostgreSQL supports streaming messages generated by other plugins too and Npgsql supports receiving those.
13 |
14 | ### General setup
15 |
16 | To set up logical replication, follow the [quick setup instructions](https://www.postgresql.org/docs/current/logical-replication-quick-setup.html) in the PostgreSQL docs (note that a SUBSCRIPTION isn't required since the client isn't PostgreSQL):
17 |
18 | Enable logical replication in your `postgresql.conf` file:
19 |
20 | ```ini
21 | wal_level = logical
22 | ```
23 |
24 | Set up a replication user in your [`pg_hba.conf`](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html) file:
25 |
26 | ```text
27 | host replication repuser 0.0.0.0/0 md5
28 | ```
29 |
30 | The user `repuser` must exist in your cluster and either be a superuser or have the replication attribute set. See [CREATE ROLE docs](https://www.postgresql.org/docs/current/sql-createrole.html).
31 |
32 | ### Logical Streaming Replication Protocol (pgoutput plugin)
33 |
34 | The modern, recommended way to perform logical replication was introduced in PostgreSQL 10 - [see the PostgreSQL documentation](https://www.postgresql.org/docs/current/logical-replication.html). This method, using the built-in pgoutput replication plugin, streams efficient, binary messages to represent database updates such as INSERT, UPDATE and DELETE ([see the full list](https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html)); Npgsql exposes these messages as an `IAsyncEnumerable` which can easily be enumerated and consumed.
35 |
36 | [Create a publication](https://www.postgresql.org/docs/current/sql-createpublication.html), which defines the group of tables in the database you wish to replicate:
37 |
38 | ```sql
39 | CREATE PUBLICATION blog_pub FOR TABLE blogs;
40 | ```
41 |
42 | Create a replication slot, which will hold the state of the replication stream:
43 |
44 | ```sql
45 | SELECT * FROM pg_create_logical_replication_slot('blog_slot', 'pgoutput');
46 | ```
47 |
48 | If your application goes down, the slot persistently records the last data streamed to it, and allows resuming the application at the point where it left off.
49 |
50 | At this point, everything is ready to start replicating! Create this simple .NET program with Npgsql:
51 |
52 | ```csharp
53 | await using var conn = new LogicalReplicationConnection("");
54 | await conn.Open();
55 |
56 | var slot = new PgOutputReplicationSlot("blog_slot");
57 |
58 | // The following will loop until the cancellation token is triggered, and will print message types coming from PostgreSQL:
59 | var cancellationTokenSource = new CancellationTokenSource();
60 | await foreach (var message in conn.StartReplication(
61 | slot, new PgOutputReplicationOptions("blog_pub", 1), cancellationTokenSource.Token))
62 | {
63 | Console.WriteLine($"Received message type: {message.GetType().Name}");
64 |
65 | // Always call SetReplicationStatus() or assign LastAppliedLsn and LastFlushedLsn individually
66 | // so that Npgsql can inform the server which WAL files can be removed/recycled.
67 | conn.SetReplicationStatus(message.WalEnd);
68 | }
69 | ```
70 |
71 | For example, if you insert a new row into your `blogs` table, you should see the following output:
72 |
73 | ```output
74 | Received message type: BeginMessage
75 | Received message type: RelationMessage
76 | Received message type: InsertMessage
77 | Received message type: CommitMessage
78 | ```
79 |
80 | > [!WARNING]
81 | > Npgsql internally recycles the message instances it hands out. It is an error to use a message received from `StartReplication` once the next message has been read.
82 |
83 | The above was just a minimal "getting started" guide for logical replication - many additional configuration options and modes exist as well. Consult the PostgreSQL documentation for more details.
84 |
85 | ### Test decoding (test_decoding plugin)
86 |
87 | An additional logical replication plugin which Npgsql supports is `test_decoding`. This plugin outputs textual representations of events, which are less efficient and need to be parsed; it is meant for testing that replication works rather than for building robust production apps. However, it can still be useful in some scenarios, especially in older PostgreSQL versions where `pgoutput` wasn't yet introduced.
88 |
89 | To use `test_decoding`, first create a logical replication slot with `test_decoding` as the plugin type.
90 |
91 | ```sql
92 | SELECT * FROM pg_create_logical_replication_slot('blog_slot', 'test_decoding');
93 | ```
94 |
95 | After that use the following:
96 |
97 | ```csharp
98 | await using var conn = new LogicalReplicationConnection("Host=localhost;Username=test;Password=test");
99 | await conn.Open();
100 |
101 | var slot = new TestDecodingReplicationSlot("blog_slot");
102 |
103 | // The following will loop until the cancellation token is triggered, and will print message types coming from PostgreSQL:
104 | var cancellationTokenSource = new CancellationTokenSource();
105 | await foreach (var message in conn.StartReplication(slot, cancellationTokenSource.Token))
106 | {
107 | Console.WriteLine($"Message: {message.Data}");
108 |
109 | // Always call SetReplicationStatus() or assign LastAppliedLsn and LastFlushedLsn individually
110 | // so that Npgsql can inform the server which WAL files can be removed/recycled.
111 | conn.SetReplicationStatus(message.WalEnd);
112 | }
113 | ```
114 |
115 | Inserting a row will produce the following string messages:
116 |
117 | ```output
118 | Message: BEGIN 230413
119 | Message: table public.blogs: INSERT: id[integer]:2 name[text]:'blog1'
120 | Message: COMMIT 230413
121 | ```
122 |
123 | > [!WARNING]
124 | > Npgsql internally recycles the message instances it hands out. It is an error to use a message received from `StartReplication` once the next message has been read.
125 |
126 | ## Physical replication
127 |
128 | Finally, PostgreSQL also supports physical replication, which streams raw block data rather than logical events on changes. While useful for synchronizing PostgreSQL replicas and supported by Npgsql, this mode is unlikely to be useful for a typical .NET program client.
129 |
130 | ## Additional resources
131 |
132 | * See [here](https://event-driven.io/en/push_based_outbox_pattern_with_postgres_logical_replication/) for a great post on implementing the outbox pattern via PostgreSQL logical replication. The outbox pattern guarantees delivery of an event from the database to e.g. a queue.
133 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/mapping/range.md:
--------------------------------------------------------------------------------
1 | # Ranges and Multiranges
2 |
3 | PostgreSQL has the unique feature of supporting [*range data types*](https://www.postgresql.org/docs/current/static/rangetypes.html). Ranges represent a range of numbers, dates or other data types, and allow you to easily query ranges which contain a value, perform set operations (e.g. query ranges which contain other ranges), and other similar operations. The range operations supported by PostgreSQL are listed [in this page](https://www.postgresql.org/docs/current/static/functions-range.html). The Npgsql EF Core provider allows you to seamlessly map PostgreSQL ranges, and even perform operations on them that get translated to SQL for server evaluation.
4 |
5 | In addition, PostgreSQL 14 introduced *multiranges*, which are basically sorted arrays of non-overlapping ranges with set-theoretic operations defined over them. Most range operators also work on multiranges, and they have a few functions of their own.
6 |
7 | ## Ranges
8 |
9 | Npgsql maps PostgreSQL ranges to the generic CLR type `NpgsqlRange`:
10 |
11 | ```csharp
12 | public class Event
13 | {
14 | public int Id { get; set; }
15 | public string Name { get; set; }
16 | public NpgsqlRange Duration { get; set; }
17 | }
18 | ```
19 |
20 | This will create a column of type `daterange` in your database. You can similarly have properties of type `NpgsqlRange`, `NpgsqlRange`, etc.
21 |
22 | ## User-defined ranges
23 |
24 | PostgreSQL comes with 6 built-in ranges: `int4range`, `int8range`, `numrange`, `tsrange`, `tstzrange`, `daterange`; these can be used simply by adding the appropriate `NpgsqlRange` property in your entities as shown above. You can also define your own range types over arbitrary types, and use those in EF Core as well.
25 |
26 | To make the EF Core type mapper aware of your user-defined range, call the `MapRange()` method in your context's `OnConfiguring()` method as follows:
27 |
28 | ```csharp
29 | protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
30 | => optionsBuilder.UseNpgsql(
31 | "",
32 | options => options.MapRange("floatrange"));
33 | ```
34 |
35 | This allows you to have properties of type `NpgsqlRange`, which will be mapped to PostgreSQL `floatrange`.
36 |
37 | The above does *not* create the `floatrange` type for you. In order to do that, include the following in your context's `OnModelCreating()`:
38 |
39 | ```csharp
40 | protected override void OnModelCreating(ModelBuilder modelBuilder)
41 | => modelBuilder.HasPostgresRange("floatrange", "real");
42 | ```
43 |
44 | This will cause the appropriate [`CREATE TYPE ... AS RANGE`](https://www.postgresql.org/docs/current/static/sql-createtype.html) statement to be generated in your migrations, ensuring that your range is created and ready for use. Note that `HasPostgresRange()` supports additional parameters as supported by PostgreSQL `CREATE TYPE`.
45 |
46 | ## Multiranges
47 |
48 | Npgsql maps arrays or lists of `NpgsqlRange` to PostgreSQL multiranges:
49 |
50 | ```csharp
51 | public class Store
52 | {
53 | public int Id { get; set; }
54 | public string Name { get; set; }
55 | public NpgsqlRange[] OpeningTimes { get; set; }
56 | }
57 | ```
58 |
59 | ## Operation translation
60 |
61 | Ranges can be queried via extensions methods on `NpgsqlRange`:
62 |
63 | ```csharp
64 | var events = context.Events.Where(p => p.Duration.Contains(someDate));
65 | ```
66 |
67 | This will translate to an SQL operation using the PostgreSQL `@>` operator, evaluating at the server and saving you from transferring the entire `Events` table to the client. Note that you can (and probably should) create indexes to make this operation more efficient, see the PostgreSQL docs for more info.
68 |
69 | The following table lists the range operations that currently get translated. Most operations work on both ranges and multiranges (starting with version 6.0.0); the multirange version is omitted for brevity.
70 |
71 | If you run into a missing operation, please open an issue.
72 |
73 | .NET | SQL
74 | --------------------------------------|-----
75 | range.LowerBound | [lower(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
76 | range.UpperBound | [upper(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
77 | range.LowerBoundIsInclusive | [lower_inc(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
78 | range.UpperBoundIsInclusive | [upper_inc(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
79 | range.LowerBoundIsInfinite | [lower_inf(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
80 | range.UpperBoundIsInfinite | [upper_inf(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
81 | range.IsEmpty | [isempty(range)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
82 | multirange.Any() | [NOT is_empty(multirange)](https://www.postgresql.org/docs/current/functions-range.html#MULTIRANGE-FUNCTIONS-TABLE)
83 | range.Contains(i) | [range @> i](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
84 | range1.Contains(range2) | [range @> range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
85 | range1.ContainedBy(range2) | [range1 <@ range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
86 | range1.Overlaps(range2) | [range1 && range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
87 | range1.IsStrictlyLeftOf(range2) | [range1 << range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
88 | range1.IsStrictlyRightOf(range2) | [range1 >> range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
89 | range1.DoesNotExtendLeftOf(range2) | [range1 &> range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
90 | range1.DoesNotExtendRightOf(range2) | [range1 <& range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
91 | range1.IsAdjacentTo(range2) | [range1 -\|- range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
92 | range1.Union(range2) | [range1 + range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
93 | range1.Intersect(range2) | [range1 * range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
94 | range1.Except(range2) | [range1 - range2](https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE)
95 | range1.Merge(range2) | [range_merge(range1, range2)](https://www.postgresql.org/docs/current/functions-range.html#RANGE-FUNCTIONS-TABLE)
96 | multirange.Merge() | [range_merge(multirange)](https://www.postgresql.org/docs/current/functions-range.html#MULTIRANGE-FUNCTIONS-TABLE)
97 | ranges.RangeAgg() | [range_agg(ranges)](https://www.postgresql.org/docs/current/functions-aggregate.html)
98 | ranges.RangeIntersectAgg() | [range_intersect_agg(ranges)](https://www.postgresql.org/docs/current/functions-aggregate.html)
99 | multiranges.RangeIntersectAgg() | [range_intersect_agg(multiranges)](https://www.postgresql.org/docs/current/functions-aggregate.html) | See [Aggregate functions](translations.md#aggregate-functions).
100 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/release-notes/9.0.md:
--------------------------------------------------------------------------------
1 | # 9.0 Release Notes
2 |
3 | Npgsql.EntityFrameworkCore.PostgreSQL version 9.0 is out and available on [nuget.org](https://www.nuget.org/packages/Npgsql.EntityFrameworkCore.PostgreSQL).
4 |
5 | ## Improved, unified configuration experience
6 |
7 | The Npgsql EF provider is built on top of the lower-level Npgsql ADO.NET provider; the configuration interface between these two layers was less than ideal, and configuration been more difficult than it should have been. For version 9.0, the configuration experience has been considerably improved.
8 |
9 | Since version 7, the Npgsql ADO.NET provider has been moving to [NpgsqlDataSource](../../Npgsql/basic-usage.md#data-source) as the preferred way of configuring connections and obtaining them. At the EF level, it has been possible to pass an NpgsqlDataSource instance to `UseNpgsql()`; but this required that the user separately configure a data source and manage it. In addition, features such as plugins and enums require support from both the EF and ADO.NET layers, forcing users to perform multiple setup actions at the different layers.
10 |
11 | With version 9, `UseNpgsql()` becomes a single point for configuration, for both the EF and ADO.NET levels. EF can now internally set up an NpgsqlDataSource, automatically applying all the necessary configuration to it, and also exposes an API to allow users to apply arbitrary configuration to it as well:
12 |
13 | ```csharp
14 | builder.Services.AddDbContextPool(opt =>
15 | opt.UseNpgsql(
16 | builder.Configuration.GetConnectionString("BloggingContext"),
17 | o => o
18 | .SetPostgresVersion(13, 0)
19 | .UseNodaTime()
20 | .MapEnum("mood")
21 | .ConfigureDataSource(dataSourceBuilder => dataSourceBuilder.UseClientCertificate(certificate))));
22 | ```
23 |
24 | In the above code, the following configuration gestures are performed:
25 |
26 | 1. `SetPostgresVersion()` is an EF-only option to produce SQL for PostgreSQL version 13 (avoiding newer incompatible features)
27 | 2. `UseNodaTime()`, adds a plugin allowing use of NodaTime for date/time type mapping. This also requires an ADO.NET NodaTime plugin which needed to be configured separately, but this is now done automatically.
28 | 3. `MapEnum()` maps a .NET enum type. Like `UseNodaTime()`, this also used to require a separate ADO.NET configuration gesture, but is now done automatically. As an added bonus, doing this now also adds the enum to the model, causing the enum to be created in the database via EF's migrations.
29 | 4. `ConfigureDataSource()` exposes an NpgsqlDataSourceBuilder, which you can use to configure arbitrary ADO.NET options. In this example, the certificate is defined for the TLS authentication process.
30 |
31 | For more information, see the [getting started docs](../index.md).
32 |
33 | ### Improved configuration for enums and plugins
34 |
35 | Previously, configuration around enums and plugins (NodaTime, NetTopologySuite) was complicated, requiring multiple setup actions at both the EF and the lower-level Npgsql layers. EF 9.0 improves the configuration story, allowing you to configure enums and plugins via a single EF gesture:
36 |
37 | ```csharp
38 | builder.Services.AddPooledDbContext(options => options.UseNpgsql(
39 | "",
40 | o => o.MapEnum("mood")));
41 | ```
42 |
43 | This takes care of everything - EF configuration, lower-level Npgsql configuration and even the addition of the enum to the EF model, which ensures that the enum is created in the database in EF migrations.
44 |
45 | See the [enum](../mapping/enum.md), [NodaTime](../mapping/nodatime.md) and [NetTopologySuite](../mapping/nts.md) documentation for more details.
46 |
47 | ## UUIDv7 GUIDs are generated by default
48 |
49 | When your entity types have a `Guid` key, EF Core by default generates key values for new entities client-side - in .NET - before inserting those entity types to the database; this can be better for performance in some situations. Before version 9.0, the provider generated random GUIDs (version 4) by calling the .NET [`Guid.NewGuid()`](https://learn.microsoft.com/en-us/dotnet/api/system.guid.newguid?view=net-8.0#system-guid-newguid) function. Unfortunately, random GUIDs aren't ideal for database indexing and can cause performance issues.
50 |
51 | Version 9.0 of the provider now generates the recently standardized version 7 GUIDs, which is a sequential GUID type that's more appropriate for database indexes and improves their performance. This new behavior is on by default and takes effect simply by upgrading the provider version.
52 |
53 | See [this post](https://www.cybertec-postgresql.com/en/unexpected-downsides-of-uuid-keys-in-postgresql) for more details and performance numbers on random vs. sequential GUIDs.
54 |
55 | Thanks to [@ChrisJollyAU](https://github.com/ChrisJollyAU) and [@Timovzl](https://github.com/Timovzl) for contributing this improvement!
56 |
57 | ## Other new features
58 |
59 | * Translate `array.Where(i => i != x)` to `array_remove(array, x)`
60 | * Translate `DateOnly.DayNumber`, `DateOnly.FromDayNumber()` and simplify `dateOnly1.DayNumber - dateOnly2.DayNumber` to `dateOnly1 - dateOnly2`.
61 | * Map the PostgreSQL `jsonpath` type to .NET string; this allows mapping to `jsonpath` columns.
62 |
63 | See the [9.0.0 milestone](https://github.com/npgsql/efcore.pg/milestone/61?closed=1) for the full list of Npgsql EF provider issues.
64 |
65 | ## Breaking changes
66 |
67 | ### Enum mappings must now be configured at the EF level
68 |
69 | Previously, enum configuration involved mapping the enum at the lower-level Npgsql layer (either via `NpgsqlDataSourceBuilder.MapEnum` or via `NpgsqlConnection.GlobalTypeMapper.MapEnum`); the EF provider automatically picked this configuration up for the EF-level setup. Unfortunately, this design created numerous issues and bugs.
70 |
71 | As part of the improved enum configuration story in version 9.0 ([see above](#improved-configuration-for-enums-and-plugins)), enums must now be configured at the EF level; although this is a breaking change for existing applications, it usually results in simplified setup code and fixes various bugs and problematic behavior.
72 |
73 | If your application calls `UseNpgsql` with a simple connection string (rather than an NpgsqlDataSource), it simply needs to add a `MapEnum` call there:
74 |
75 | ```csharp
76 | builder.Services.AddDbContext(options => options.UseNpgsql(
77 | "",
78 | o => o.MapEnum("mood")));
79 | ```
80 |
81 | All other setup code - the `MapEnum` call on `NpgsqlConnection.GlobalTypeMapper` and the `HasPostgresEnum` call in `OnModelCreating` - can be removed.
82 |
83 | If your application passes an NpgsqlDataSource to `UseNpgsql`, it also needs to add the `MapEnum` call as above; but the `MapEnum` call on `NpgsqlDataSourceBuilder` must also be kept.
84 |
85 | See the [enum documentation](../mapping/enum.md) for more information.
86 |
87 | ## Contributors
88 |
89 | A big thank you to all the following people who contributed to the 9.0 release!
90 |
91 | ### [Milestone 9.0.0](https://github.com/npgsql/efcore.pg/milestone/61?closed=1)
92 |
93 | Contributor | Assigned issues
94 | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------
95 | [@roji](https://github.com/roji) | [28](https://github.com/Npgsql/efcore.pg/issues?q=is%3Aissue+milestone%3A9.0.0+is%3Aclosed+assignee%3Aroji)
96 | [@ChrisJollyAU](https://github.com/ChrisJollyAU) | [2](https://github.com/Npgsql/efcore.pg/issues?q=is%3Aissue+milestone%3A9.0.0+is%3Aclosed+assignee%3AChrisJollyAU)
97 | [@Timovzl](https://github.com/Timovzl) | [1](https://github.com/Npgsql/efcore.pg/issues?q=is%3Aissue+milestone%3A9.0.0+is%3Aclosed+assignee%3ATimovzl)
98 | [@WhatzGames](https://github.com/WhatzGames) | [1](https://github.com/Npgsql/efcore.pg/issues?q=is%3Aissue+milestone%3A9.0.0+is%3Aclosed+assignee%3AWhatzGames)
99 |
--------------------------------------------------------------------------------
/conceptual/EFCore.PG/release-notes/10.0.md:
--------------------------------------------------------------------------------
1 | # 10.0 Release Notes
2 |
3 | Npgsql.EntityFrameworkCore.PostgreSQL version 10.0 is now in development, preview versions are available on [nuget.org](https://www.nuget.org/packages/Npgsql.EntityFrameworkCore.PostgreSQL).
4 |
5 | ## Full support for EF 10 JSON complex types
6 |
7 | EF 10 introduced support for mapping .NET types as JSON complex types, resolving several issues that existed with the previous JSON mapping via owned entities ([see release notes for more information](https://learn.microsoft.com/en-us/ef/core/what-is-new/ef-core-10.0/whatsnew#json)). The PG provider providers full support for this feature full support for this as well:
8 |
9 | ```c#
10 | modelBuilder.Entity(b =>
11 | {
12 | b.ComplexProperty(c => c.ShippingAddress, c => c.ToJson());
13 | b.ComplexProperty(c => c.BillingAddress, c => c.ToJson());
14 | });
15 | ```
16 |
17 | This configuration causes the following table to be created for your customers:
18 |
19 | ```sql
20 | CREATE TABLE "Customers" (
21 | "Id" integer GENERATED BY DEFAULT AS IDENTITY,
22 | "Name" text,
23 | "BillingAddress" jsonb NOT NULL,
24 | "ShippingAddress" jsonb NOT NULL,
25 | CONSTRAINT "PK_Customers" PRIMARY KEY ("Id")
26 | );
27 | ```
28 |
29 | This is now the preferred way to perform strongly-typed JSON mapping of arbitrary .NET types, and replaces owned entities and [legacy POCO mapping](../mapping/json.md?#legacy-poco-mapping-deprecated).
30 |
31 | The provider now also supports performing partial updates within JSON documents using `ExecuteUpdate`. For example, the following efficiently copies overwrites all Customers' shipping address streets with their billing address streets:
32 |
33 | ```c#
34 | await context.Customers.ExecuteUpdateAsync(s =>
35 | s.SetProperty(b => b.ShippingAddress.Street, b => b.BillingAddress.Street));
36 | ```
37 |
38 | This produces the following SQL:
39 |
40 | ```sql
41 | UPDATE "Customers" AS c
42 | SET "ShippingAddress" = jsonb_set(c."ShippingAddress", '{Street}', c."BillingAddress" -> 'Street')
43 | ```
44 |
45 | ## Better support for JSON scalar (primitive) collections
46 |
47 | In most relational databases, scalar collections are mapped to a JSON column, the the collection is serialized to a JSON array in the database. PostgreSQL, however, is unique in providing a 1st-class array type, so the EF provider maps scalar collections to array instead. For example, given the following type:
48 |
49 | ```c#
50 | public class Customer
51 | {
52 | public int Id { get; set; }
53 | public string[] Tags { get; set; }
54 | }
55 | ```
56 |
57 | ... the PostgreSQL provider will create the following table (note that `text[]` array column):
58 |
59 | ```sql
60 | CREATE TABLE "Customers" (
61 | "Id" integer GENERATED BY DEFAULT AS IDENTITY,
62 | "Tags" text[] NOT NULL,
63 | CONSTRAINT "PK_Customers" PRIMARY KEY ("Id")
64 | );
65 | ```
66 |
67 | However, when scalar collections are nested within a JSON document, they must be mapped to JSON arrays, as in other databases:
68 |
69 | ```c#
70 | public class Customer
71 | {
72 | public int Id { get; set; }
73 | public Address Address { get; set; }
74 | }
75 |
76 | public class Address
77 | {
78 | // ...
79 |
80 | public string[] Tags { get; set; }
81 | }
82 | ```
83 |
84 | Version 10 of the provider now produces much better SQL when querying such nested scalar collections. For example, when querying using Contains:
85 |
86 | ```c#
87 | var customers = await context.Customers.Where(b => b.ShippingAddress.Tags.Contains("foo")).ToListAsync();
88 | ```
89 |
90 | ... previous versions of the provider generated the following complicated (and inefficient) SQL:
91 |
92 | ```sql
93 | SELECT c."Id", c."Name", c."ShippingAddress"
94 | FROM "Customers" AS c
95 | WHERE 'foo' = ANY ((ARRAY(SELECT CAST(element AS text) FROM jsonb_array_elements_text(c."ShippingAddress" -> 'Tags') WITH ORDINALITY AS t(element) ORDER BY ordinality)))
96 | ```
97 |
98 | Version 10, in contrast, produces the following cleaner SQL, which can also benefit from indexes:
99 |
100 | ```sql
101 | SELECT c."Id", c."Name", c."ShippingAddress"
102 | FROM "Customers" AS c
103 | WHERE (c."ShippingAddress" -> 'Tags') @> to_jsonb('foo'::text)
104 | ```
105 |
106 | Finally, version 10 of the provider also allows you to map a non-nested scalar collection to a JSON column, instead of to an array column, and provides fully querying capabilities:
107 |
108 | ```c#
109 | public class Customer
110 | {
111 | // ...
112 |
113 | [Column(TypeName = "jsonb")]
114 | public string[] Tags { get; set; }
115 | }
116 | ```
117 |
118 | ## Support for PostgreSQL 18 virtual generated columns
119 |
120 | Before PostgreSQL 18, generated (or "computed") columns could only be stored, meaning they were computed when a row is inserted or updated, and take up space on disk just like regular columns. PostgreSQL 18 introduced support for *virtual* generated columns, which are instead calculated when read, and take up no space on disk. Virtual columns can be defined with version 10 of the PostgreSQL provider as follows:
121 |
122 | ```c#
123 | protected override void OnModelCreating(ModelBuilder modelBuilder)
124 | {
125 | modelBuilder.Entity()
126 | .Property(p => p.DisplayName)
127 | .HasComputedColumnSql(@"""FirstName"" || ' ' || ""LastName""");
128 | }
129 | ```
130 |
131 | Note that previously, `stored: true` had to be specified in the above code sample; starting with version 10, omitting it causes a virtual generated column to be created (supported on PostgreSQL 18 only).
132 |
133 | For more information, [see the documentation](../modeling/generated-properties.md#computed-generated-columns).
134 |
135 | ## Support for UUIDv7
136 |
137 | By default, EF generates GUID (or UUID) values locally in .NET, rather than relying on the database to generate them. Version 9 of the PG provider already switched to generating UUIDv7 values by default ([see release note](9.0.md#uuidv7-guids-are-generated-by-default)), which are significantly better for database indexes. PostgreSQL 18 also added the [`uuidv7()`](https://www.postgresql.org/docs/18/functions-uuid.html#FUNC_UUID_GEN_TABLE) built-in function, which allows database generation of UUIDv7 values. In EFCore.PG 10, if you configure the provider to target PG 18 (`.UseNpgsql("...", o => o.SetPostgresVersion(18, 0))`), the provider will also translate [`Guid.CreateVersion7()`](https://learn.microsoft.com/dotnet/api/system.guid.createversion7) to that function.
138 |
139 | ## Support for the cube extension
140 |
141 | PostgreSQL bundles the [`cube`](https://www.postgresql.org/docs/current/cube.html) extension, which provides a multi-dimensional cube data type and associated operations. EFCore.PG 10 now fully supports the `cube` type out-of-the-box, simply add an `NpgsqlCube` property to your entity type:
142 |
143 | ```c#
144 | public class Blog
145 | {
146 | ...
147 |
148 | public NpgsqlCube Cube { get; set; }
149 | }
150 | ```
151 |
152 | Most translations over the type are supported as well. Thanks to [@kirkbrauer](https://github.com/kirkbrauer) for contributing this feature!
153 |
154 | ## Other new features
155 |
156 | * NodaTime `LocalDate.At()` and `LocalDate.AtMidnight()` are now translated.
157 |
158 | See the [10.0.0 milestone](https://github.com/npgsql/efcore.pg/milestone/68?closed=1) for the full list of Npgsql EF provider issues.
159 |
160 | ## Breaking changes
161 |
162 | * `EF.Functions.Network()` and `EF.Functions.Merge()` have been changed to return the new .NET [`IPNetwork`](https://learn.microsoft.com/dotnet/api/system.net.ipnetwork) instead of the obsolete `NpgsqlCidr`. The new `IPNetwork` type works with all other functions as well (but no breaking changes were necessary).
163 | * The PostgreSQL network type `cidr` is now scaffolded to the new .NET [`IPNetwork`](https://learn.microsoft.com/dotnet/api/system.net.ipnetwork) type. The older Npgsql representation for `cidr` - the `NpgsqlCidr` type - has been obsoleted and will be removed in a future release.
164 |
--------------------------------------------------------------------------------
/conceptual/Npgsql/release-notes/3.1.md:
--------------------------------------------------------------------------------
1 | # Npgsql 3.1 Release Notes
2 |
3 | ## Migrating from 3.0 to 3.1
4 |
5 | * CommandTimeout used to be implemented with PostgreSQL's `statement_timeout` parameter, but this wasn't a very reliable method and has been removed. CommandTimeout is now implemented via socket timeouts only, see [#689](https://github.com/npgsql/npgsql/issues/689) for more details. Note that if a socket timeout occurs, the connection is broken and must be reopened.
6 | * The [`Persist Security Info`](../connection-string-parameters.md#security-and-encryption) parameter has been implemented and is false by default. This means that once a connection has been opened, you will not be able to get its password.
7 | * Removed ContinuousProcessing mode, and replaced it with [Wait](../wait.md), a simpler and less bug-prone mechanism for consuming asynchronous notifications ([#1024](https://github.com/npgsql/npgsql/issues/1024)).
8 | * The `Maximum Pool Size` connection is parameter is now 100 default instead of 20 (this is default in SqlClient, pg_bouncer...).
9 | * The `Connection Lifetime` parameter has been renamed to `Connection Idle Lifetime`, and its default has been changed from 15 to 300. Also, once the number of seconds has elapsed the connection is closed immediately; the previous behavior closed half of the connections.
10 | * `RegisterEnum` and `RegisterEnumGlobally` have been renamed to `MapEnum` and `MapEnumGlobally` respectively.
11 | * If you used enum mapping in 3.0, the strategy for translating between CLR and PostgreSQL type names has changed. In 3.0 Npgsql simply used the CLR name (e.g. SomeField) as the PostgreSQL name; Npgsql 3.1 uses a user-definable name translator, default to snake case (e.g. some_field). See [#859](https://github.com/npgsql/npgsql/issues/859).
12 | * The `EnumLabel` attribute has been replaced by the `PgName` attribute (which is also used for the new composite type support).
13 | * When PostgreSQL sends an error, it is no longer raised by an NpgsqlException but by a PostgresException. PostgresException is a subclass of NpgsqlException so code catching NpgsqlException should still work, but the PostgreSQL-specific exception properties will only be available on PostgresException.
14 | * The Code property on NpgsqlException has been renamed to SqlState. It has also been moved to PostgresException.
15 | * NpgsqlNotice has been renamed to PostgresNotice
16 | * For multistatement commands, PostgreSQL parse errors will now be thrown only when the user calls NextResult() and gets to the problematic statement.
17 | * It is no longer possible to dispose a prepared statement while a reader is still open. Since disposing a prepared statement includes database interaction, the connection must be idle.
18 | * Removed `NpgsqlConnection.SupportsHexByteFormat`.
19 | * Renamed `NpgsqlConnection.Supports_E_StringPrefix` to `SupportsEStringPrefix`.
20 |
21 | ## Contributors
22 |
23 | Thank you very much to the following people who have contributed to the individual 3.1.x. releases.
24 |
25 | ### [Milestone 3.1.10](https://github.com/Npgsql/Npgsql/issues?q=is%3Aissue+milestone%3A3.1.10)
26 |
27 | | Contributor | Assigned issues |
28 | | -------------------------------- | --------------------------------------------------------------------------------------------------------:|
29 | | [@roji](https://github.com/roji) | [5](https://github.com/Npgsql/Npgsql/issues?q=is%3Aissue+milestone%3A3.1.10+is%3Aclosed+assignee%3Aroji) |
30 |
31 | ### [Milestone 3.1.9](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.9)
32 |
33 | Contributor | Assigned issues
34 | -------------------------------------------- | ----------------:|
35 | [@roji](https://github.com/roji) | [10](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.9+is%3Aclosed+assignee%3Aroji)
36 | [@DaveVdE](https://github.com/DaveVdE) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.9+is%3Aclosed+assignee%3ADaveVdE)
37 | [@rwasef1830](https://github.com/rwasef1830) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.9+is%3Aclosed+assignee%3Arwasef1830)
38 |
39 | ### [Milestone 3.1.8](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.8)
40 |
41 | Contributor | Assigned issues
42 | -------------------------------------------- | ----------------:|
43 | [@roji](https://github.com/roji) | [10](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.8+is%3Aclosed+assignee%3Aroji)
44 |
45 | ### [Milestone 3.1.7](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.7)
46 |
47 | Contributor | Assigned issues
48 | -------------------------------------------- | ----------------:|
49 | [@roji](https://github.com/roji) | [7](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.7+is%3Aclosed+assignee%3Aroji)
50 |
51 | ### [Milestone 3.1.6](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.6)
52 |
53 | Contributor | Assigned issues
54 | -------------------------------------------- | ----------------:|
55 | [@roji](https://github.com/roji) | [5](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.6+is%3Aclosed+assignee%3Aroji)
56 |
57 | ### [Milestone 3.1.5](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.5)
58 |
59 | Contributor | Assigned issues
60 | -------------------------------------------- | ----------------:|
61 | [@roji](https://github.com/roji) | [4](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.5+is%3Aclosed+assignee%3Aroji)
62 |
63 | ### [Milestone 3.1.4](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.4)
64 |
65 | Contributor | Assigned issues
66 | -------------------------------------------- | ----------------:|
67 | [@roji](https://github.com/roji) | [2](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.4+is%3Aclosed+assignee%3Aroji)
68 |
69 | ### [Milestone 3.1.3](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.3)
70 |
71 | Contributor | Assigned issues
72 | -------------------------------------------- | ----------------:|
73 | [@roji](https://github.com/roji) | [10](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.3+is%3Aclosed+assignee%3Aroji)
74 |
75 | ### [Milestone 3.1.2](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.2)
76 |
77 | Contributor | Assigned issues
78 | -------------------------------------------- | ----------------:|
79 | [@roji](https://github.com/roji) | [1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.2+is%3Aclosed+assignee%3Aroji)
80 |
81 | ### [Milestone 3.1.10](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.10)
82 |
83 | Contributor | Assigned issues
84 | -------------------------------------------- | ----------------:|
85 | [@roji](https://github.com/roji) | [5](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.10+is%3Aclosed+assignee%3Aroji)
86 |
87 | ### [Milestone 3.1.1](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.1)
88 |
89 | Contributor | Assigned issues
90 | -------------------------------------------- | ----------------:|
91 | [@roji](https://github.com/roji) | [5](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.1+is%3Aclosed+assignee%3Aroji)
92 |
93 | ### [Milestone 3.1.0](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.0)
94 |
95 | Contributor | Assigned issues
96 | -------------------------------------------- | ----------------:|
97 | [@roji](https://github.com/roji) | [24](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.0+is%3Aclosed+assignee%3Aroji)
98 | [@Emill](https://github.com/Emill) | [2](https://github.com/npgsql/npgsql/issues?q=is%3Aissue+milestone%3A3.1.0+is%3Aclosed+assignee%3AEmill)
99 |
--------------------------------------------------------------------------------
/conceptual/EF6.PG/index.md:
--------------------------------------------------------------------------------
1 | # Entity Framework 6
2 |
3 | Npgsql has an Entity Framework 6 provider. You can use it by installing the
4 | [EntityFramework6.Npgsql](https://www.nuget.org/packages/EntityFramework6.Npgsql/) nuget.
5 |
6 | ## Basic Configuration
7 |
8 | Configuration for an Entity Framework application can be specified in a config file (app.config/web.config) or through code. The latter is known as code-based configuration.
9 |
10 | ### Code-based
11 |
12 | To use Entity Framework with Npgsql, define a class that inherits from `DbConfiguration` in the same assembly as your class inheriting `DbContext`. Ensure that you configure provider services, a provider factory, a default connection factory as shown below:
13 |
14 | ```csharp
15 | using Npgsql;
16 | using System.Data.Entity;
17 |
18 | class NpgSqlConfiguration : DbConfiguration
19 | {
20 | public NpgSqlConfiguration()
21 | {
22 | var name = "Npgsql";
23 |
24 | SetProviderFactory(providerInvariantName: name,
25 | providerFactory: NpgsqlFactory.Instance);
26 |
27 | SetProviderServices(providerInvariantName: name,
28 | provider: NpgsqlServices.Instance);
29 |
30 | SetDefaultConnectionFactory(connectionFactory: new NpgsqlConnectionFactory());
31 | }
32 | }
33 | ```
34 |
35 | ### Config file
36 |
37 | When installing `EntityFramework6.Npgsql` nuget package, the relevant sections in `App.config` / `Web.config` are usually automatically updated. You typically only have to add your `connectionString` with the correct `providerName`.
38 |
39 | ```xml
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 | ```
58 |
59 | ## Guid Support
60 |
61 | Npgsql EF migrations support uses `uuid_generate_v4()` function to generate guids.
62 | In order to have access to this function, you have to install the extension uuid-ossp through the following command:
63 |
64 | ```sql
65 | create extension "uuid-ossp";
66 | ```
67 |
68 | If you don't have this extension installed, when you run Npgsql migrations you will get the following error message:
69 |
70 | ```text
71 | ERROR: function uuid_generate_v4() does not exist
72 | ```
73 |
74 | If the database is being created by Npgsql Migrations, you will need to
75 | [run the `create extension` command in the `template1` database](http://stackoverflow.com/a/11584751).
76 | This way, when the new database is created, the extension will be installed already.
77 |
78 | ## Optimistic Concurrency
79 |
80 | EntityFramework supports [optimistic concurrency](https://docs.microsoft.com/en-us/aspnet/mvc/overview/getting-started/getting-started-with-ef-using-mvc/handling-concurrency-with-the-entity-framework-in-an-asp-net-mvc-application), through the [system column `xmin`](https://www.postgresql.org/docs/current/ddl-system-columns.html). To use this column as the concurrency token, some [customization is needed](https://github.com/npgsql/EntityFramework6.Npgsql/issues/8). The following code will setup `Department.Version` to map to `xmin`, while the `SqlGenerator` will generate `CREATE/ALTER TABLE` statements omitting system columns.
81 |
82 | ```csharp
83 | public class Department {
84 | public string Version { get; private set; }
85 | }
86 |
87 | [DbConfigurationType(typeof(Configuration))]
88 | public class UniversityDbContext : DbContext
89 | {
90 | public DbSet Departments { get; set; }
91 |
92 | protected override void OnModelCreating(DbModelBuilder modelBuilder)
93 | {
94 | modelBuilder.Entity()
95 | .Property(p => p.Version)
96 | .HasColumnName("xmin")
97 | .HasColumnType("text")
98 | .IsConcurrencyToken()
99 | .HasDatabaseGeneratedOption(DatabaseGeneratedOption.Computed);
100 | base.OnModelCreating(modelBuilder);
101 | }
102 | }
103 |
104 | internal class Configuration : DbConfiguration
105 | {
106 | public Configuration()
107 | {
108 | SetMigrationSqlGenerator("Npgsql", () => new SqlGenerator());
109 | }
110 | }
111 |
112 | public class SqlGenerator : NpgsqlMigrationSqlGenerator
113 | {
114 | private readonly string[] systemColumnNames = { "oid", "tableoid", "xmin", "cmin", "xmax", "cmax", "ctid" };
115 |
116 | protected override void Convert(CreateTableOperation createTableOperation)
117 | {
118 | var systemColumns = createTableOperation.Columns.Where(x => systemColumnNames.Contains(x.Name)).ToArray();
119 | foreach (var systemColumn in systemColumns)
120 | createTableOperation.Columns.Remove(systemColumn);
121 | base.Convert(createTableOperation);
122 | }
123 | }
124 | ```
125 |
126 | ## Template Database
127 |
128 | When the Entity Framework 6 provider creates a database, it issues a simple `CREATE DATABASE` command.
129 | In PostgreSQL, this implicitly uses `template1` as the template - anything existing in `template1` will
130 | be copied to your new database. If you wish to change the database used as a template, you can specify
131 | the `EF Template Database` connection string parameter. For more info see the
132 | [PostgreSQL docs](https://www.postgresql.org/docs/current/static/sql-createdatabase.html).
133 |
134 | ## Customizing DataReader Behavior
135 |
136 | You can use [an Entity Framework 6 IDbCommandInterceptor](https://msdn.microsoft.com/library/dn469464(v=vs.113).aspx) to wrap the `DataReader` instance returned by Npgsql when Entity Framework executes queries. This is possible using a ```DbConfiguration``` class.
137 |
138 | Example use cases:
139 |
140 | - Forcing all returned ```DateTime``` and ```DateTimeOffset``` values to be in the UTC timezone.
141 | - Preventing accidental insertion of DateTime values having ```DateTimeKind.Unspecified```.
142 | - Forcing all postgres date/time types to be returned to Entity Framework as ```DateTimeOffset```.
143 |
144 | ```csharp
145 | [DbConfigurationType(typeof(AppDbContextConfiguration))]
146 | public class AppDbContext : DbContext
147 | {
148 | // ...
149 | }
150 |
151 | public class AppDbContextConfiguration : DbConfiguration
152 | {
153 | public AppDbContextConfiguration()
154 | {
155 | this.AddInterceptor(new MyEntityFrameworkInterceptor());
156 | }
157 | }
158 |
159 | class MyEntityFrameworkInterceptor : DbCommandInterceptor
160 | {
161 | public override void ReaderExecuted(
162 | DbCommand command,
163 | DbCommandInterceptionContext interceptionContext)
164 | {
165 | if (interceptionContext.Result == null) return;
166 | interceptionContext.Result = new WrappingDbDataReader(interceptionContext.Result);
167 | }
168 |
169 | public override void ScalarExecuted(
170 | DbCommand command,
171 | DbCommandInterceptionContext interceptionContext)
172 | {
173 | interceptionContext.Result = ModifyReturnValues(interceptionContext.Result);
174 | }
175 |
176 | static object ModifyReturnValues(object result)
177 | {
178 | // Transform and then
179 | return result;
180 | }
181 | }
182 |
183 | class WrappingDbDataReader : DbDataReader, IDataReader
184 | {
185 | // Wrap an existing DbDataReader, proxy all calls to the underlying instance,
186 | // modify return values and/or parameters as needed...
187 | public WrappingDbDataReader(DbDataReader reader)
188 | {
189 | }
190 | }
191 | ```
192 |
--------------------------------------------------------------------------------