├── .gitignore
├── .travis.yml
├── DominionEnterprises.Mongo.Tests
├── DominionEnterprises.Mongo.Tests.csproj
├── QueueTests.cs
└── app.config
├── DominionEnterprises.Mongo
├── DominionEnterprises.Mongo.csproj
└── Queue.cs
├── LICENSE
├── README.md
├── build.bat
├── build.sh
├── libs
├── MongoDB.Bson.dll
├── MongoDB.Driver.dll
└── nunit.framework.dll
├── tools.SharpCover
├── Counter.dll
├── Mono.Cecil.Mdb.dll
├── Mono.Cecil.Pdb.dll
├── Mono.Cecil.Rocks.dll
├── Mono.Cecil.dll
├── Newtonsoft.Json.dll
└── SharpCover.exe
└── travisCoverageConfig.json
/.gitignore:
--------------------------------------------------------------------------------
1 | bin/
2 | obj/
3 | TestResult.xml
4 | *~
5 | coverageResults.txt
6 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: c
2 | services: mongodb
3 | install:
4 | - sudo apt-get install mono-devel nunit-console
5 | script: sh build.sh
6 |
--------------------------------------------------------------------------------
/DominionEnterprises.Mongo.Tests/DominionEnterprises.Mongo.Tests.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | Library
4 | DominionEnterprises.Mongo.Tests
5 | 4
6 |
7 |
8 |
9 |
10 | ..\libs\nunit.framework.dll
11 |
12 |
13 | ..\libs\MongoDB.Bson.dll
14 |
15 |
16 | ..\libs\MongoDB.Driver.dll
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/DominionEnterprises.Mongo.Tests/QueueTests.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Configuration;
3 | using System.Threading;
4 | using NUnit.Framework;
5 | using MongoDB.Bson;
6 | using MongoDB.Driver;
7 | using MongoDB.Driver.GridFS;
8 | using System.IO;
9 | using System.Collections.Generic;
10 |
11 | namespace DominionEnterprises.Mongo.Tests
12 | {
13 | [TestFixture]
14 | public class QueueTests
15 | {
16 | private MongoCollection collection;
17 | private MongoGridFS gridfs;
18 | private Queue queue;
19 |
20 | [SetUp]
21 | public virtual void Setup()
22 | {
23 | collection = new MongoClient(ConfigurationManager.AppSettings["mongoQueueUrl"])
24 | .GetServer()
25 | .GetDatabase(ConfigurationManager.AppSettings["mongoQueueDb"])
26 | .GetCollection(ConfigurationManager.AppSettings["mongoQueueCollection"]);
27 |
28 | collection.Drop();
29 |
30 | gridfs = collection.Database.GetGridFS(MongoGridFSSettings.Defaults);
31 | gridfs.Files.Drop();
32 | gridfs.Chunks.Drop();
33 |
34 | queue = new Queue();
35 | }
36 |
37 | #region construct
38 | [Test]
39 | [ExpectedException(typeof(ArgumentNullException))]
40 | public void ConstructWithNullUrl()
41 | {
42 | new Queue(null, string.Empty, string.Empty);
43 | }
44 |
45 | [Test]
46 | [ExpectedException(typeof(ArgumentNullException))]
47 | public void ConstructWithNullDb()
48 | {
49 | new Queue(string.Empty, null, string.Empty);
50 | }
51 |
52 | [Test]
53 | [ExpectedException(typeof(ArgumentNullException))]
54 | public void ConstructWithNullCollection()
55 | {
56 | new Queue(string.Empty, string.Empty, null);
57 | }
58 |
59 | [Test]
60 | [ExpectedException(typeof(ArgumentNullException))]
61 | public void ConstructWithNullCollectionObject()
62 | {
63 | new Queue(null);
64 | }
65 |
66 | [Test]
67 | public void ConstructCollectionObject()
68 | {
69 | var collection = new MongoClient(ConfigurationManager.AppSettings["mongoQueueUrl"])
70 | .GetServer()
71 | .GetDatabase(ConfigurationManager.AppSettings["mongoQueueDb"])
72 | .GetCollection(ConfigurationManager.AppSettings["mongoQueueCollection"]);
73 | new Queue(collection);
74 | }
75 | #endregion
76 |
77 | #region EnsureGetIndex
78 | [Test]
79 | public void EnsureGetIndex()
80 | {
81 | queue.EnsureGetIndex(new IndexKeysDocument("type", 1), new IndexKeysDocument("boo", -1));
82 | queue.EnsureGetIndex(new IndexKeysDocument("another.sub", 1));
83 |
84 | Assert.AreEqual(4, collection.GetIndexes().Count);
85 |
86 | var expectedOne = new IndexKeysDocument { { "running", 1 }, { "payload.type", 1 }, { "priority", 1 }, { "created", 1 }, { "payload.boo", -1 }, { "earliestGet", 1 } };
87 | Assert.AreEqual(expectedOne, collection.GetIndexes()[1].Key);
88 |
89 | var expectedTwo = new IndexKeysDocument { { "running", 1 }, { "resetTimestamp", 1 } };
90 | Assert.AreEqual(expectedTwo, collection.GetIndexes()[2].Key);
91 |
92 | var expectedThree = new IndexKeysDocument { { "running", 1 }, { "payload.another.sub", 1 }, { "priority", 1 }, { "created", 1 }, { "earliestGet", 1 } };
93 | Assert.AreEqual(expectedThree, collection.GetIndexes()[3].Key);
94 | }
95 |
96 | [Test]
97 | public void EnsureGetIndexWithNoArgs()
98 | {
99 | queue.EnsureGetIndex();
100 |
101 | Assert.AreEqual(3, collection.GetIndexes().Count);
102 |
103 | var expectedOne = new IndexKeysDocument { { "running", 1 }, { "priority", 1 }, { "created", 1 }, { "earliestGet", 1 } };
104 | Assert.AreEqual(expectedOne, collection.GetIndexes()[1].Key);
105 |
106 | var expectedTwo = new IndexKeysDocument { { "running", 1 }, { "resetTimestamp", 1 } };
107 | Assert.AreEqual(expectedTwo, collection.GetIndexes()[2].Key);
108 | }
109 |
110 | [Test]
111 | [ExpectedException(typeof(Exception))]
112 | public void EnsureGetIndexWithTooLongCollectionName()
113 | {
114 | //121 chars
115 | var collectionName = "messages01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012";
116 |
117 | queue = new Queue(ConfigurationManager.AppSettings["mongoQueueUrl"], ConfigurationManager.AppSettings["mongoQueueDb"], collectionName);
118 | queue.EnsureGetIndex(new IndexKeysDocument());
119 | }
120 |
121 | [Test]
122 | [ExpectedException(typeof(ArgumentException))]
123 | public void EnsureGetIndexWithBadBeforeSortValue()
124 | {
125 | queue.EnsureGetIndex(new IndexKeysDocument("field", "NotAnInt"));
126 | }
127 |
128 | [Test]
129 | [ExpectedException(typeof(ArgumentException))]
130 | public void EnsureGetIndexWithBadAfterSortValue()
131 | {
132 | queue.EnsureGetIndex(new IndexKeysDocument(), new IndexKeysDocument("field", "NotAnInt"));
133 | }
134 |
135 | [Test]
136 | [ExpectedException(typeof(ArgumentNullException))]
137 | public void EnsureGetIndexWithNullBeforeSort()
138 | {
139 | queue.EnsureGetIndex(null, new IndexKeysDocument());
140 | }
141 |
142 | [Test]
143 | [ExpectedException(typeof(ArgumentNullException))]
144 | public void EnsureGetIndexWithNullAfterSort()
145 | {
146 | queue.EnsureGetIndex(new IndexKeysDocument(), null);
147 | }
148 | #endregion
149 |
150 | #region EnsureCountIndex
151 | [Test]
152 | public void EnsureCountIndex()
153 | {
154 | queue.EnsureCountIndex(new IndexKeysDocument { { "type", 1 }, { "boo", -1 } }, false);
155 | queue.EnsureCountIndex(new IndexKeysDocument { { "another.sub", 1 } }, true);
156 |
157 | Assert.AreEqual(3, collection.GetIndexes().Count);
158 |
159 | var expectedOne = new IndexKeysDocument { { "payload.type", 1 }, { "payload.boo", -1 } };
160 | Assert.AreEqual(expectedOne, collection.GetIndexes()[1].Key);
161 |
162 | var expectedTwo = new IndexKeysDocument { { "running", 1 }, { "payload.another.sub", 1 } };
163 | Assert.AreEqual(expectedTwo, collection.GetIndexes()[2].Key);
164 | }
165 |
166 | [Test]
167 | public void EnsureCountIndexWithPrefixOfPrevious()
168 | {
169 | queue.EnsureCountIndex(new IndexKeysDocument { { "type", 1 }, { "boo", -1 } }, false);
170 | queue.EnsureCountIndex(new IndexKeysDocument { { "type", 1 } }, false);
171 |
172 | Assert.AreEqual(2, collection.GetIndexes().Count);
173 |
174 | var expectedOne = new IndexKeysDocument { { "payload.type", 1 }, { "payload.boo", -1 } };
175 | Assert.AreEqual(expectedOne, collection.GetIndexes()[1].Key);
176 | }
177 |
178 | [Test]
179 | [ExpectedException(typeof(ArgumentException))]
180 | public void EnsureCountIndexWithBadValue()
181 | {
182 | queue.EnsureCountIndex(new IndexKeysDocument("field", "NotAnInt"), true);
183 | }
184 |
185 | [Test]
186 | [ExpectedException(typeof(ArgumentNullException))]
187 | public void EnsureCountIndexWithNull()
188 | {
189 | queue.EnsureCountIndex(null, true);
190 | }
191 | #endregion
192 |
193 | #region Get
194 | [Test]
195 | [ExpectedException(typeof(ArgumentNullException))]
196 | public void GetWithNullQuery()
197 | {
198 | queue.Get(null, TimeSpan.MaxValue);
199 | }
200 |
201 | [Test]
202 | public void GetByBadQuery()
203 | {
204 | queue.Send(new BsonDocument { { "key1", 0 }, { "key2", true } });
205 |
206 | var message = queue.Get(new QueryDocument { { "key1", 0 }, { "key2", false } }, TimeSpan.MaxValue, TimeSpan.Zero);
207 | Assert.IsNull(message);
208 |
209 | Assert.AreEqual(1, collection.Count());
210 | }
211 |
212 | [Test]
213 | public void GetByFullQuery()
214 | {
215 | var messageOne = new BsonDocument { { "id", "SHOULD NOT BE AFFECTED" }, { "key1", 0 }, { "key2", true } };
216 |
217 | using (var streamOne = new MemoryStream())
218 | using (var streamTwo = new MemoryStream())
219 | {
220 | streamOne.WriteByte(111);
221 | streamTwo.WriteByte(222);
222 | streamOne.Position = 0;
223 | streamTwo.Position = 0;
224 | queue.Send(messageOne, DateTime.Now, 0.0, new Dictionary{ { "one", streamOne }, { "two", streamTwo } });
225 | }
226 | queue.Send(new BsonDocument("key", "value"));
227 |
228 | var result = queue.Get(new QueryDocument(messageOne), TimeSpan.FromHours(1), TimeSpan.MinValue);
229 |
230 | Assert.AreEqual(messageOne, result.Payload);
231 | Assert.AreEqual(111, result.Streams["one"].ReadByte());
232 | Assert.AreEqual(222, result.Streams["two"].ReadByte());
233 | }
234 |
235 | [Test]
236 | public void GetBySubDocQuery()
237 | {
238 | var messageTwo = new BsonDocument
239 | {
240 | {
241 | "one",
242 | new BsonDocument
243 | {
244 | { "two", new BsonDocument { { "three", 5 }, { "notused", "notused" } } },
245 | { "notused", "notused" },
246 | }
247 | },
248 | { "notused", "notused" },
249 | };
250 |
251 | queue.Send(new BsonDocument { { "key1", 0 }, { "key2", true } });
252 | queue.Send(messageTwo);
253 |
254 | var result = queue.Get(new QueryDocument("one.two.three", new BsonDocument("$gt", 4)), TimeSpan.MaxValue, TimeSpan.MaxValue, TimeSpan.MinValue, false);
255 |
256 | Assert.AreEqual(messageTwo, result.Payload);
257 | }
258 |
259 | [Test]
260 | public void GetBeforeAck()
261 | {
262 | var messageOne = new BsonDocument { { "key1", 0 }, { "key2", true } };
263 |
264 | queue.Send(messageOne);
265 | queue.Send(new BsonDocument("key", "value"));
266 |
267 | queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
268 |
269 | //try get message we already have before ack
270 | var result = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
271 | Assert.IsNull(result);
272 | }
273 |
274 | [Test]
275 | public void GetWithCustomPriority()
276 | {
277 | var messageOne = new BsonDocument { { "key", 0 } };
278 | var messageTwo = new BsonDocument { { "key", 1 } };
279 | var messageThree = new BsonDocument { { "key", 2 } };
280 |
281 | queue.Send(messageOne, DateTime.Now, 0.5);
282 | queue.Send(messageTwo, DateTime.Now, 0.4);
283 | queue.Send(messageThree, DateTime.Now, 0.3);
284 |
285 | var resultOne = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
286 | var resultTwo = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
287 | var resultThree = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
288 |
289 | Assert.AreEqual(messageThree, resultOne.Payload);
290 | Assert.AreEqual(messageTwo, resultTwo.Payload);
291 | Assert.AreEqual(messageOne, resultThree.Payload);
292 | }
293 |
294 | [Test]
295 | public void GetWithTimeBasedPriority()
296 | {
297 | var messageOne = new BsonDocument { { "key", 0 } };
298 | var messageTwo = new BsonDocument { { "key", 1 } };
299 | var messageThree = new BsonDocument { { "key", 2 } };
300 |
301 | queue.Send(messageOne);
302 | queue.Send(messageTwo);
303 | queue.Send(messageThree);
304 |
305 | var resultOne = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
306 | var resultTwo = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
307 | var resultThree = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
308 |
309 | Assert.AreEqual(messageOne, resultOne.Payload);
310 | Assert.AreEqual(messageTwo, resultTwo.Payload);
311 | Assert.AreEqual(messageThree, resultThree.Payload);
312 | }
313 |
314 | [Test]
315 | public void GetWithTimeBasedPriorityAndOldTimestamp()
316 | {
317 | var messageOne = new BsonDocument { { "key", 0 } };
318 | var messageTwo = new BsonDocument { { "key", 1 } };
319 | var messageThree = new BsonDocument { { "key", 2 } };
320 |
321 | queue.Send(messageOne);
322 | queue.Send(messageTwo);
323 | queue.Send(messageThree);
324 |
325 | var resultTwo = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
326 | //ensuring using old timestamp shouldn't affect normal time order of Send()s
327 | queue.AckSend(resultTwo.Handle, resultTwo.Payload, DateTime.UtcNow, 0.0, false);
328 |
329 | var resultOne = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
330 | resultTwo = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
331 | var resultThree = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
332 |
333 | Assert.AreEqual(messageOne, resultOne.Payload);
334 | Assert.AreEqual(messageTwo, resultTwo.Payload);
335 | Assert.AreEqual(messageThree, resultThree.Payload);
336 | }
337 |
338 | [Test]
339 | public void GetWait()
340 | {
341 | var start = DateTime.Now;
342 |
343 | queue.Get(new QueryDocument(), TimeSpan.MaxValue, TimeSpan.FromMilliseconds(200), TimeSpan.FromMilliseconds(201), false);
344 |
345 | var end = DateTime.Now;
346 |
347 | Assert.IsTrue(end - start >= TimeSpan.FromMilliseconds(200));
348 | Assert.IsTrue(end - start < TimeSpan.FromMilliseconds(400));
349 |
350 | start = DateTime.Now;
351 |
352 | queue.Get(new QueryDocument(), TimeSpan.MaxValue, TimeSpan.FromMilliseconds(200), TimeSpan.MinValue, false);
353 |
354 | end = DateTime.Now;
355 |
356 | Assert.IsTrue(end - start >= TimeSpan.FromMilliseconds(200));
357 | Assert.IsTrue(end - start < TimeSpan.FromMilliseconds(400));
358 | }
359 |
360 | [Test]
361 | public void GetApproximateWait()
362 | {
363 | var min = double.MaxValue;
364 | var max = double.MinValue;
365 | for (var i = 0; i < 10; ++i)
366 | {
367 | var start = DateTime.Now;
368 |
369 | queue.Get(new QueryDocument(), TimeSpan.MaxValue, TimeSpan.FromMilliseconds(100), TimeSpan.MinValue, true);
370 |
371 | var time = (DateTime.Now - start).TotalMilliseconds;
372 | Assert.IsTrue(time >= 80.0);//minux 0.1 of 100
373 | Assert.IsTrue(time < 200.0);
374 |
375 | min = Math.Min(min, time);
376 | max = Math.Max(max, time);
377 | }
378 |
379 | Assert.IsTrue(min < 100.0);
380 | Assert.IsTrue(max > 100.0);
381 | }
382 |
383 | [Test]
384 | public void EarliestGet()
385 | {
386 | var messageOne = new BsonDocument { { "key1", 0 }, { "key2", true } };
387 |
388 | queue.Send(messageOne, DateTime.Now + TimeSpan.FromMilliseconds(200));
389 |
390 | var resultBefore = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
391 | Assert.IsNull(resultBefore);
392 |
393 | Thread.Sleep(200);
394 |
395 | var resultAfter = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
396 | Assert.IsNotNull(resultAfter);
397 | }
398 | #endregion
399 |
400 | [Test]
401 | public void ResetStuck()
402 | {
403 | var messageOne = new BsonDocument("key", 0);
404 | var messageTwo = new BsonDocument("key", 1);
405 |
406 | queue.Send(messageOne);
407 | queue.Send(messageTwo);
408 |
409 | //sets to running
410 | collection.Update(new QueryDocument("payload.key", 0), new UpdateDocument("$set", new BsonDocument { { "running", true }, { "resetTimestamp", DateTime.UtcNow } }));
411 | collection.Update(new QueryDocument("payload.key", 1), new UpdateDocument("$set", new BsonDocument { { "running", true }, { "resetTimestamp", DateTime.UtcNow } }));
412 |
413 | Assert.AreEqual(2, collection.Count(new QueryDocument("running", true)));
414 |
415 | //sets resetTimestamp on messageOne
416 | queue.Get(new QueryDocument(messageOne), TimeSpan.MinValue, TimeSpan.Zero);
417 |
418 | //resets and gets messageOne
419 | Assert.IsNotNull(queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero));
420 |
421 | Assert.AreEqual(1, collection.Count(new QueryDocument("running", false)));
422 | }
423 |
424 | #region Count
425 | [Test]
426 | public void Count()
427 | {
428 | var message = new BsonDocument("boo", "scary");
429 |
430 | Assert.AreEqual(0, queue.Count(new QueryDocument(message), true));
431 | Assert.AreEqual(0, queue.Count(new QueryDocument(message), false));
432 | Assert.AreEqual(0, queue.Count(new QueryDocument(message)));
433 |
434 | queue.Send(message);
435 | Assert.AreEqual(1, queue.Count(new QueryDocument(message), false));
436 | Assert.AreEqual(0, queue.Count(new QueryDocument(message), true));
437 | Assert.AreEqual(1, queue.Count(new QueryDocument(message)));
438 |
439 | queue.Get(new QueryDocument(message), TimeSpan.MaxValue, TimeSpan.Zero);
440 | Assert.AreEqual(0, queue.Count(new QueryDocument(message), false));
441 | Assert.AreEqual(1, queue.Count(new QueryDocument(message), true));
442 | Assert.AreEqual(1, queue.Count(new QueryDocument(message)));
443 | }
444 |
445 | [Test]
446 | [ExpectedException(typeof(ArgumentNullException))]
447 | public void CountWithAbsentRunningAndNullQuery()
448 | {
449 | queue.Count(null);
450 | }
451 |
452 | [Test]
453 | [ExpectedException(typeof(ArgumentNullException))]
454 | public void CountWithRunningAndNullQuery()
455 | {
456 | queue.Count(null, true);
457 | }
458 | #endregion
459 |
460 | #region Ack
461 | [Test]
462 | public void Ack()
463 | {
464 | var messageOne = new BsonDocument { { "key1", 0 }, { "key2", true } };
465 |
466 | using (var streamOne = new MemoryStream())
467 | using (var streamTwo = new MemoryStream())
468 | {
469 | streamOne.WriteByte(111);
470 | streamTwo.WriteByte(222);
471 | streamOne.Position = 0;
472 | streamTwo.Position = 0;
473 | queue.Send(messageOne, DateTime.Now, 0.0, new Dictionary{ { "one", streamOne }, { "two", streamTwo }});
474 | }
475 | queue.Send(new BsonDocument("key", "value"));
476 |
477 | var result = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
478 | Assert.AreEqual(2, collection.Count());
479 |
480 | queue.Ack(result.Handle);
481 | Assert.AreEqual(1, collection.Count());
482 | Assert.AreEqual(0, gridfs.Files.Count());
483 | Assert.AreEqual(0, gridfs.Chunks.Count());
484 | }
485 |
486 | [Test]
487 | [ExpectedException(typeof(ArgumentNullException))]
488 | public void AckWithNullHandle()
489 | {
490 | queue.Ack(null);
491 | }
492 | #endregion
493 |
494 | #region AckMulti
495 | [Test]
496 | public void AckMultiMoreThanBatch()
497 | {
498 | using (var streamOne = new MemoryStream())
499 | using (var streamTwo = new MemoryStream())
500 | {
501 | streamOne.WriteByte(0);
502 | streamTwo.WriteByte(1);
503 |
504 | for (var i = 0; i < Queue.ACK_MULTI_BATCH_SIZE; ++i)
505 | {
506 | var message = new BsonDocument("key", i);
507 |
508 | streamOne.Position = 0;
509 | streamTwo.Position = 0;
510 | queue.Send(message, DateTime.Now, 0.0, new Dictionary{ { "one", streamOne }, { "two", streamTwo }});
511 | }
512 | }
513 |
514 | queue.Send(new BsonDocument("key", "value"));
515 |
516 | var handles = new Handle[Queue.ACK_MULTI_BATCH_SIZE];
517 | for (var i = 0; i < handles.Length; ++i)
518 | handles[i] = queue.Get(new QueryDocument("key", i), TimeSpan.MaxValue, TimeSpan.Zero).Handle;
519 |
520 | Assert.AreEqual(Queue.ACK_MULTI_BATCH_SIZE + 1, collection.Count());
521 | Assert.AreEqual(Queue.ACK_MULTI_BATCH_SIZE * 2, gridfs.Files.Count());
522 | Assert.AreEqual(Queue.ACK_MULTI_BATCH_SIZE * 2, gridfs.Chunks.Count());
523 |
524 | queue.AckMulti(handles);
525 |
526 | Assert.AreEqual(1, collection.Count());
527 | Assert.AreEqual(0, gridfs.Files.Count());
528 | Assert.AreEqual(0, gridfs.Chunks.Count());
529 | }
530 |
531 | [Test]
532 | public void AckMultiLessThanBatch()
533 | {
534 | using (var streamOne = new MemoryStream())
535 | using (var streamTwo = new MemoryStream())
536 | {
537 | streamOne.WriteByte(0);
538 | streamTwo.WriteByte(1);
539 | streamOne.Position = 0;
540 | streamTwo.Position = 0;
541 | queue.Send(new BsonDocument("key", 0), DateTime.Now, 0.0, new Dictionary{ { "one", streamOne }, { "two", streamTwo }});
542 | }
543 |
544 | queue.Send(new BsonDocument("key", 1));
545 | queue.Send(new BsonDocument("key", 2));
546 |
547 | var handles = new []
548 | {
549 | queue.Get(new QueryDocument("key", 0), TimeSpan.MaxValue, TimeSpan.Zero).Handle,
550 | queue.Get(new QueryDocument("key", 1), TimeSpan.MaxValue, TimeSpan.Zero).Handle,
551 | };
552 |
553 | Assert.AreEqual(3, collection.Count());
554 | Assert.AreEqual(2, gridfs.Files.Count());
555 | Assert.AreEqual(2, gridfs.Chunks.Count());
556 |
557 | queue.AckMulti(handles);
558 |
559 | Assert.AreEqual(1, collection.Count());
560 | Assert.AreEqual(0, gridfs.Files.Count());
561 | Assert.AreEqual(0, gridfs.Chunks.Count());
562 | }
563 |
564 | [Test]
565 | [ExpectedException(typeof(ArgumentNullException))]
566 | public void AckMultiWithNullHandles()
567 | {
568 | queue.AckMulti(null);
569 | }
570 | #endregion
571 |
572 | #region AckSend
573 | [Test]
574 | public void AckSend()
575 | {
576 | var messageOne = new BsonDocument { { "key1", 0 }, { "key2", true } };
577 | var messageThree = new BsonDocument { { "hi", "there" }, { "rawr", 2 } };
578 |
579 | using (var streamOne = new MemoryStream())
580 | using (var streamTwo = new MemoryStream())
581 | {
582 | streamOne.WriteByte(11);
583 | streamTwo.WriteByte(22);
584 | streamOne.Position = 0;
585 | streamTwo.Position = 0;
586 | queue.Send(messageOne, DateTime.Now, 0.0, new Dictionary { { "one", streamOne }, { "two", streamTwo } });
587 | }
588 | queue.Send(new BsonDocument("key", "value"));
589 |
590 | var resultOne = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
591 | Assert.AreEqual(2, collection.Count());
592 |
593 | using (var streamOne = new MemoryStream())
594 | using (var streamTwo = new MemoryStream())
595 | {
596 | streamOne.WriteByte(111);
597 | streamTwo.WriteByte(222);
598 | streamOne.Position = 0;
599 | streamTwo.Position = 0;
600 | queue.AckSend(resultOne.Handle, messageThree, DateTime.Now, 0.0, true, new Dictionary { { "one", streamOne }, { "two", streamTwo } });
601 | }
602 | Assert.AreEqual(2, collection.Count());
603 |
604 | var actual = queue.Get(new QueryDocument("hi", "there"), TimeSpan.MaxValue, TimeSpan.Zero);
605 | Assert.AreEqual(messageThree, actual.Payload);
606 |
607 | Assert.AreEqual(111, actual.Streams["one"].ReadByte());
608 | Assert.AreEqual(222, actual.Streams["two"].ReadByte());
609 |
610 | Assert.AreEqual(2, gridfs.Files.Count());
611 | Assert.AreEqual(2, gridfs.Chunks.Count());
612 | }
613 |
614 | [Test]
615 | public void AckSendOverloads()
616 | {
617 | var messageOne = new BsonDocument { { "key1", 0 }, { "key2", true } };
618 | var messageThree = new BsonDocument { { "hi", "there" }, { "rawr", 2 } };
619 |
620 | queue.Send(messageOne);
621 | queue.Send(new BsonDocument("key", "value"));
622 |
623 | var resultOne = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
624 | Assert.AreEqual(2, collection.Count());
625 |
626 | queue.AckSend(resultOne.Handle, messageThree);
627 |
628 | var resultTwo = queue.Get(new QueryDocument(messageThree), TimeSpan.MaxValue, TimeSpan.Zero);
629 | Assert.AreEqual(messageThree, resultTwo.Payload);
630 | Assert.AreEqual(2, collection.Count());
631 |
632 | queue.AckSend(resultTwo.Handle, messageOne, DateTime.Now);
633 |
634 | var resultThree = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
635 | Assert.AreEqual(messageOne, resultThree.Payload);
636 | Assert.AreEqual(2, collection.Count());
637 |
638 | queue.AckSend(resultThree.Handle, messageThree, DateTime.Now, 0.0);
639 |
640 | var resultFour = queue.Get(new QueryDocument(messageThree), TimeSpan.MaxValue, TimeSpan.Zero);
641 | Assert.AreEqual(messageThree, resultFour.Payload);
642 | Assert.AreEqual(2, collection.Count());
643 |
644 | queue.AckSend(resultFour.Handle, messageOne, DateTime.Now, 0.0, true);
645 |
646 | var resultFive = queue.Get(new QueryDocument(messageOne), TimeSpan.MaxValue, TimeSpan.Zero);
647 | Assert.AreEqual(messageOne, resultFive.Payload);
648 | Assert.AreEqual(2, collection.Count());
649 | }
650 |
651 | [Test]
652 | [ExpectedException(typeof(ArgumentException))]
653 | public void AckSendWitNanPriority()
654 | {
655 | queue.Send(new BsonDocument());
656 | var result = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
657 | queue.AckSend(result.Handle, new BsonDocument("key", "value"), DateTime.Now, Double.NaN);
658 | }
659 |
660 | [Test]
661 | [ExpectedException(typeof(ArgumentNullException))]
662 | public void AckSendWithNullMessage()
663 | {
664 | queue.AckSend(null, new BsonDocument("key", "value"));
665 | }
666 |
667 | [Test]
668 | [ExpectedException(typeof(ArgumentNullException))]
669 | public void AckSendWithNullPayload()
670 | {
671 | queue.Send(new BsonDocument());
672 | var result = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
673 | queue.AckSend(result.Handle, null);
674 | }
675 |
676 | [Test]
677 | public void AckSendWithNullStreams()
678 | {
679 | using (var streamOne = new MemoryStream())
680 | using (var streamTwo = new MemoryStream())
681 | {
682 | streamOne.WriteByte(11);
683 | streamTwo.WriteByte(22);
684 | streamOne.Position = 0;
685 | streamTwo.Position = 0;
686 | queue.Send(new BsonDocument(), DateTime.Now, 0.0, new Dictionary { { "one", streamOne }, { "two", streamTwo } });
687 | }
688 | var resultOne = queue.Get(new QueryDocument(), TimeSpan.MaxValue);
689 |
690 | var messageTwo = new BsonDocument("key", "value");
691 | queue.AckSend(resultOne.Handle, messageTwo, DateTime.Now, 0.0, true, null);
692 |
693 | var resultTwo = queue.Get(new QueryDocument(messageTwo), TimeSpan.MaxValue);
694 | Assert.AreEqual(1, collection.Count());
695 | Assert.AreEqual(messageTwo, resultTwo.Payload);
696 |
697 | Assert.AreEqual(11, resultTwo.Streams["one"].ReadByte());
698 | Assert.AreEqual(22, resultTwo.Streams["two"].ReadByte());
699 |
700 | Assert.AreEqual(2, gridfs.Files.Count());
701 | Assert.AreEqual(2, gridfs.Chunks.Count());
702 | }
703 | #endregion
704 |
705 | #region Send
706 | [Test]
707 | public void Send()
708 | {
709 | var now = DateTime.Now;
710 |
711 | var payload = new BsonDocument { { "key1", 0 }, { "key2", true } };
712 |
713 | using (var streamOne = new MemoryStream())
714 | using (var streamTwo = new MemoryStream())
715 | {
716 | gridfs.Upload(streamOne, "one");//making sure same file names are ok as long as their ids are diffrent
717 |
718 | streamOne.WriteByte(111);
719 | streamTwo.WriteByte(222);
720 | streamOne.Position = 0;
721 | streamTwo.Position = 0;
722 | queue.Send(payload, now, 0.8, new Dictionary{ { "one", streamOne }, { "two", streamTwo } });
723 | }
724 |
725 | var expected = new BsonDocument
726 | {
727 | //_id added below
728 | { "payload", payload },
729 | { "running", false },
730 | { "resetTimestamp", new BsonDateTime(DateTime.MaxValue) },
731 | { "earliestGet", new BsonDateTime(now) },
732 | { "priority", 0.8 },
733 | //streams added below
734 | //created added below
735 | };
736 |
737 | var message = collection.FindOneAs();
738 |
739 | var actualCreated = message["created"];
740 | expected["created"] = actualCreated;
741 | actualCreated = actualCreated.ToUniversalTime();
742 |
743 | var actualStreamIds = message["streams"].AsBsonArray;
744 | expected["streams"] = actualStreamIds;
745 |
746 | Assert.IsTrue(actualCreated <= DateTime.UtcNow);
747 | Assert.IsTrue(actualCreated > DateTime.UtcNow - TimeSpan.FromSeconds(10));
748 |
749 | expected.InsertAt(0, new BsonElement("_id", message["_id"]));
750 | Assert.AreEqual(expected, message);
751 |
752 | var fileOne = gridfs.FindOneById(actualStreamIds[0]);
753 | Assert.AreEqual("one", fileOne.Name);
754 | using (var stream = fileOne.OpenRead())
755 | Assert.AreEqual(111, stream.ReadByte());
756 |
757 | var fileTwo = gridfs.FindOneById(actualStreamIds[1]);
758 | Assert.AreEqual("two", fileTwo.Name);
759 | using (var stream = fileTwo.OpenRead())
760 | Assert.AreEqual(222, stream.ReadByte());
761 | }
762 |
763 | [Test]
764 | [ExpectedException(typeof(ArgumentException))]
765 | public void SendWitNanPriority()
766 | {
767 | queue.Send(new BsonDocument("key", "value"), DateTime.Now, Double.NaN);
768 | }
769 |
770 | [Test]
771 | [ExpectedException(typeof(ArgumentNullException))]
772 | public void SendWithNullMessage()
773 | {
774 | queue.Send(null);
775 | }
776 |
777 | [Test]
778 | [ExpectedException(typeof(ArgumentNullException))]
779 | public void SendWithNullStreams()
780 | {
781 | queue.Send(new BsonDocument("key", "value"), DateTime.Now, 0.0, null);
782 | }
783 | #endregion
784 |
785 | #region GetRandomDouble
786 | [Test]
787 | public void GetRandomDoubleFromZeroToOne()
788 | {
789 | var count = 1000;
790 | var sum = 0.0;
791 | for (var i = 0; i < count; ++i)
792 | {
793 | var randomDouble = Queue.GetRandomDouble(0.0, 1.0);
794 | sum += randomDouble;
795 | Assert.IsTrue(randomDouble <= 1.0);
796 | Assert.IsTrue(randomDouble >= 0.0);
797 | }
798 |
799 | var average = sum / (double)count;
800 |
801 | Assert.IsTrue(average >= 0.45);
802 | Assert.IsTrue(average <= 0.55);
803 | }
804 |
805 | [Test]
806 | public void GetRandomDoubleFromNegativeOneToPositiveOne()
807 | {
808 | var count = 1000;
809 | var sum = 0.0;
810 | for (var i = 0; i < count; ++i)
811 | {
812 | var randomDouble = Queue.GetRandomDouble(-1.0, 1.0);
813 | sum += randomDouble;
814 | Assert.IsTrue(randomDouble <= 1.0);
815 | Assert.IsTrue(randomDouble >= -1.0);
816 | }
817 |
818 | var average = sum / (double)count;
819 |
820 | Assert.IsTrue(average >= -0.05);
821 | Assert.IsTrue(average <= 0.05);
822 | }
823 |
824 | [Test]
825 | public void GetRandomDoubleFromThreeToFour()
826 | {
827 | var count = 1000;
828 | var sum = 0.0;
829 | for (var i = 0; i < count; ++i)
830 | {
831 | var randomDouble = Queue.GetRandomDouble(3.0, 4.0);
832 | sum += randomDouble;
833 | Assert.IsTrue(randomDouble <= 4.0);
834 | Assert.IsTrue(randomDouble >= 3.0);
835 | }
836 |
837 | var average = sum / (double)count;
838 |
839 | Assert.IsTrue(average >= 3.45);
840 | Assert.IsTrue(average <= 3.55);
841 | }
842 |
843 | [Test]
844 | [ExpectedException(typeof(ArgumentException))]
845 | public void GetRandomDoubleWithNaNMin()
846 | {
847 | Queue.GetRandomDouble(double.NaN, 4.0);
848 | }
849 |
850 | [Test]
851 | [ExpectedException(typeof(ArgumentException))]
852 | public void GetRandomDoubleWithNaNMax()
853 | {
854 | Queue.GetRandomDouble(4.0, double.NaN);
855 | }
856 |
857 | [Test]
858 | [ExpectedException(typeof(ArgumentException))]
859 | public void GetRandomDoubleWithMaxLessThanMin()
860 | {
861 | Queue.GetRandomDouble(4.0, 3.9);
862 | }
863 | #endregion
864 | }
865 | }
866 |
--------------------------------------------------------------------------------
/DominionEnterprises.Mongo.Tests/app.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/DominionEnterprises.Mongo/DominionEnterprises.Mongo.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | Library
4 | DominionEnterprises.Mongo
5 | 4
6 | true
7 |
8 |
9 |
10 |
11 | ..\libs\MongoDB.Bson.dll
12 |
13 |
14 | ..\libs\MongoDB.Driver.dll
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/DominionEnterprises.Mongo/Queue.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Configuration;
3 | using System.Threading;
4 | using System.Linq;
5 | using MongoDB.Bson;
6 | using MongoDB.Driver;
7 | using System.Reflection;
8 | using System.Security.Cryptography;
9 | using System.Collections.Generic;
10 | using MongoDB.Driver.GridFS;
11 | using System.IO;
12 | using System.Runtime.CompilerServices;
13 |
14 | [assembly: AssemblyVersion("2.0.0.*"), InternalsVisibleTo("DominionEnterprises.Mongo.Tests")]
15 |
16 | namespace DominionEnterprises.Mongo
17 | {
18 | ///
19 | /// Abstraction of mongo db collection as priority queue.
20 | ///
21 | ///
22 | /// Tied priorities are then ordered by time. So you may use a single priority for normal queuing (overloads exist for this purpose).
23 | /// Using a random priority achieves random Get()
24 | ///
25 | public sealed class Queue
26 | {
27 | internal const int ACK_MULTI_BATCH_SIZE = 1000;
28 |
29 | private readonly MongoCollection collection;
30 | private readonly MongoGridFS gridfs;
31 |
32 | ///
33 | /// Construct MongoQueue with url, db name and collection name from app settings keys mongoQueueUrl, mongoQueueDb and mongoQueueCollection
34 | ///
35 | public Queue()
36 | : this(ConfigurationManager.AppSettings["mongoQueueUrl"], ConfigurationManager.AppSettings["mongoQueueDb"], ConfigurationManager.AppSettings["mongoQueueCollection"])
37 | { }
38 |
39 | ///
40 | /// Construct MongoQueue
41 | ///
42 | /// mongo url like mongodb://localhost
43 | /// db name
44 | /// collection name
45 | /// url, db or collection is null
46 | public Queue(string url, string db, string collection)
47 | {
48 | if (url == null) throw new ArgumentNullException("url");
49 | if (db == null) throw new ArgumentNullException("db");
50 | if (collection == null) throw new ArgumentNullException("collection");
51 |
52 | this.collection = new MongoClient(url).GetServer().GetDatabase(db).GetCollection(collection);
53 | this.gridfs = this.collection.Database.GetGridFS(MongoGridFSSettings.Defaults);
54 | }
55 |
56 | ///
57 | /// Construct MongoQueue
58 | ///
59 | /// collection
60 | /// collection is null
61 | public Queue(MongoCollection collection)
62 | {
63 | if (collection == null) throw new ArgumentNullException("collection");
64 |
65 | this.collection = collection;
66 | this.gridfs = collection.Database.GetGridFS(MongoGridFSSettings.Defaults);
67 | }
68 |
69 | #region EnsureGetIndex
70 | ///
71 | /// Ensure index for Get() method with no fields before or after sort fields
72 | ///
73 | public void EnsureGetIndex()
74 | {
75 | EnsureGetIndex(new IndexKeysDocument());
76 | }
77 |
78 | ///
79 | /// Ensure index for Get() method with no fields after sort fields
80 | ///
81 | /// fields in Get() call that should be before the sort fields in the index
82 | /// beforeSort is null
83 | /// beforeSort field value is not 1 or -1
84 | public void EnsureGetIndex(IndexKeysDocument beforeSort)
85 | {
86 | EnsureGetIndex(beforeSort, new IndexKeysDocument());
87 | }
88 |
89 | ///
90 | /// Ensure index for Get() method
91 | ///
92 | /// fields in Get() call that should be before the sort fields in the index
93 | /// fields in Get() call that should be after the sort fields in the index
94 | /// beforeSort or afterSort is null
95 | /// beforeSort or afterSort field value is not 1 or -1
96 | public void EnsureGetIndex(IndexKeysDocument beforeSort, IndexKeysDocument afterSort)
97 | {
98 | if (beforeSort == null) throw new ArgumentNullException("beforeSort");
99 | if (afterSort == null) throw new ArgumentNullException("afterSort");
100 |
101 | //using general rule: equality, sort, range or more equality tests in that order for index
102 | var completeIndex = new IndexKeysDocument("running", 1);
103 |
104 | foreach (var field in beforeSort)
105 | {
106 | if (field.Value != 1 && field.Value != -1) throw new ArgumentException("field values must be 1 or -1 for ascending or descending", "beforeSort");
107 | completeIndex.Add("payload." + field.Name, field.Value);
108 | }
109 |
110 | completeIndex.Add("priority", 1);
111 | completeIndex.Add("created", 1);
112 |
113 | foreach (var field in afterSort)
114 | {
115 | if (field.Value != -1 && field.Value != 1) throw new ArgumentException("field values must be 1 or -1 for ascending or descending", "afterSort");
116 | completeIndex.Add("payload." + field.Name, field.Value);
117 | }
118 |
119 | completeIndex.Add("earliestGet", 1);
120 |
121 | EnsureIndex(completeIndex);//main query in Get()
122 | EnsureIndex(new IndexKeysDocument { { "running", 1 }, { "resetTimestamp", 1 } });//for the stuck messages query in Get()
123 | }
124 | #endregion
125 |
126 | ///
127 | /// Ensure index for Count() method
128 | /// Is a no-op if the generated index is a prefix of an existing one. If you have a similar EnsureGetIndex call, call it first.
129 | ///
130 | /// fields in Count() call
131 | /// whether running was given to Count() or not
132 | /// index was null
133 | /// index field value is not 1 or -1
134 | public void EnsureCountIndex(IndexKeysDocument index, bool includeRunning)
135 | {
136 | if (index == null) throw new ArgumentNullException("index");
137 |
138 | var completeFields = new IndexKeysDocument();
139 |
140 | if (includeRunning)
141 | completeFields.Add("running", 1);
142 |
143 | foreach (var field in index)
144 | {
145 | if (field.Value != 1 && field.Value != -1) throw new ArgumentException("field values must be 1 or -1 for ascending or descending", "index");
146 | completeFields.Add("payload." + field.Name, field.Value);
147 | }
148 |
149 | EnsureIndex(completeFields);
150 | }
151 |
152 | #region Get
153 | ///
154 | /// Get a non running message from queue with a wait of 3 seconds and poll of 200 milliseconds
155 | ///
156 | /// query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}
157 | /// duration before this message is considered abandoned and will be given with another call to Get()
158 | /// message or null
159 | /// query is null
160 | public Message Get(QueryDocument query, TimeSpan resetRunning)
161 | {
162 | return Get(query, resetRunning, TimeSpan.FromSeconds(3));
163 | }
164 |
165 | ///
166 | /// Get a non running message from queue with a poll of 200 milliseconds
167 | ///
168 | /// query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}
169 | /// duration before this message is considered abandoned and will be given with another call to Get()
170 | /// duration to keep polling before returning null
171 | /// message or null
172 | /// query is null
173 | public Message Get(QueryDocument query, TimeSpan resetRunning, TimeSpan wait)
174 | {
175 | return Get(query, resetRunning, wait, TimeSpan.FromMilliseconds(200));
176 | }
177 |
178 | ///
179 | /// Get a non running message from queue with an approxiate wait.
180 | ///
181 | /// query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}
182 | /// duration before this message is considered abandoned and will be given with another call to Get()
183 | /// duration to keep polling before returning null
184 | /// duration between poll attempts
185 | /// message or null
186 | /// query is null
187 | public Message Get(QueryDocument query, TimeSpan resetRunning, TimeSpan wait, TimeSpan poll)
188 | {
189 | return Get(query, resetRunning, wait, poll, true);
190 | }
191 |
192 | ///
193 | /// Get a non running message from queue
194 | ///
195 | /// query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}
196 | /// duration before this message is considered abandoned and will be given with another call to Get()
197 | /// duration to keep polling before returning null
198 | /// duration between poll attempts
199 | /// whether to fluctuate the wait time randomly by +-10 percent. This ensures Get() calls seperate in time when multiple Queues are used in loops started at the same time
200 | /// message or null
201 | /// query is null
202 | public Message Get(QueryDocument query, TimeSpan resetRunning, TimeSpan wait, TimeSpan poll, bool approximateWait)
203 | {
204 | if (query == null)
205 | throw new ArgumentNullException ("query");
206 |
207 | //reset stuck messages
208 | collection.Update(
209 | new QueryDocument { { "running", true }, { "resetTimestamp", new BsonDocument("$lte", DateTime.UtcNow) } },
210 | new UpdateDocument("$set", new BsonDocument("running", false)),
211 | UpdateFlags.Multi
212 | );
213 |
214 | var builtQuery = new QueryDocument("running", false);
215 | foreach (var field in query)
216 | builtQuery.Add("payload." + field.Name, field.Value);
217 |
218 | builtQuery.Add("earliestGet", new BsonDocument("$lte", DateTime.UtcNow));
219 |
220 | var resetTimestamp = DateTime.UtcNow;
221 | try
222 | {
223 | resetTimestamp += resetRunning;
224 | }
225 | catch (ArgumentOutOfRangeException)
226 | {
227 | resetTimestamp = resetRunning > TimeSpan.Zero ? DateTime.MaxValue : DateTime.MinValue;
228 | }
229 |
230 | var sort = new SortByDocument { { "priority", 1 }, { "created", 1 } };
231 | var update = new UpdateDocument("$set", new BsonDocument { { "running", true }, { "resetTimestamp", resetTimestamp } });
232 | var fields = new FieldsDocument { { "payload", 1 }, { "streams", 1 } };
233 |
234 | var end = DateTime.UtcNow;
235 | try
236 | {
237 | if (approximateWait)
238 | //fluctuate randomly by 10 percent
239 | wait += TimeSpan.FromMilliseconds(wait.TotalMilliseconds * GetRandomDouble(-0.1, 0.1));
240 |
241 | end += wait;
242 | }
243 | catch (Exception e)
244 | {
245 | if (!(e is OverflowException) && !(e is ArgumentOutOfRangeException))
246 | throw e;//cant cover
247 |
248 | end = wait > TimeSpan.Zero ? DateTime.MaxValue : DateTime.MinValue;
249 | }
250 |
251 | while (true)
252 | {
253 | var findModifyArgs = new FindAndModifyArgs { Query = builtQuery, SortBy = sort, Update = update, Fields = fields, Upsert = false };
254 |
255 | var message = collection.FindAndModify(findModifyArgs).ModifiedDocument;
256 | if (message != null)
257 | {
258 | var handleStreams = new List>();
259 | var messageStreams = new Dictionary();
260 | foreach (var streamId in message["streams"].AsBsonArray)
261 | {
262 | var fileInfo = gridfs.FindOneById(streamId);
263 |
264 | var stream = fileInfo.OpenRead();
265 |
266 | handleStreams.Add(new KeyValuePair(streamId, stream));
267 | messageStreams.Add(fileInfo.Name, stream);
268 | }
269 |
270 | var handle = new Handle(message["_id"].AsObjectId, handleStreams);
271 | return new Message(handle, message["payload"].AsBsonDocument, messageStreams);
272 | }
273 |
274 | if (DateTime.UtcNow >= end)
275 | return null;
276 |
277 | try
278 | {
279 | Thread.Sleep(poll);
280 | }
281 | catch (ArgumentOutOfRangeException)
282 | {
283 | if (poll < TimeSpan.Zero)
284 | poll = TimeSpan.Zero;
285 | else
286 | poll = TimeSpan.FromMilliseconds(int.MaxValue);
287 |
288 | Thread.Sleep(poll);
289 | }
290 |
291 | if (DateTime.UtcNow >= end)
292 | return null;
293 | }
294 | }
295 | #endregion
296 |
297 | #region Count
298 | ///
299 | /// Count in queue, running true or false
300 | ///
301 | /// query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}
302 | /// count
303 | /// query is null
304 | public long Count(QueryDocument query)
305 | {
306 | if (query == null) throw new ArgumentNullException("query");
307 |
308 | var completeQuery = new QueryDocument();
309 |
310 | foreach (var field in query)
311 | completeQuery.Add("payload." + field.Name, field.Value);
312 |
313 | return collection.Count(completeQuery);
314 | }
315 |
316 | ///
317 | /// Count in queue
318 | ///
319 | /// query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}
320 | /// count running messages or not running
321 | /// count
322 | /// query is null
323 | public long Count(QueryDocument query, bool running)
324 | {
325 | if (query == null) throw new ArgumentNullException("query");
326 |
327 | var completeQuery = new QueryDocument("running", running);
328 | foreach (var field in query)
329 | completeQuery.Add("payload." + field.Name, field.Value);
330 |
331 | return collection.Count(completeQuery);
332 | }
333 | #endregion
334 |
335 | ///
336 | /// Acknowledge a handle was processed and remove from queue.
337 | ///
338 | /// handle received from Get()
339 | /// handle is null
340 | public void Ack(Handle handle)
341 | {
342 | if (handle == null) throw new ArgumentNullException("handle");
343 |
344 | collection.Remove(new QueryDocument("_id", handle.Id));
345 |
346 | foreach (var stream in handle.Streams)
347 | {
348 | stream.Value.Dispose();
349 | gridfs.DeleteById(stream.Key);
350 | }
351 | }
352 |
353 | ///
354 | /// Acknowledge multiple handles were processed and remove from queue.
355 | ///
356 | /// handles received from Get()
357 | /// handles is null
358 | public void AckMulti(IEnumerable handles)
359 | {
360 | if (handles == null) throw new ArgumentNullException("handles");
361 |
362 | var ids = new BsonArray();
363 | foreach (var handle in handles)
364 | {
365 | ids.Add(handle.Id);
366 |
367 | if (ids.Count != ACK_MULTI_BATCH_SIZE)
368 | continue;
369 |
370 | collection.Remove(new QueryDocument("_id", new BsonDocument("$in", ids)));
371 | ids.Clear();
372 | }
373 |
374 | if (ids.Count > 0)
375 | collection.Remove(new QueryDocument("_id", new BsonDocument("$in", ids)));
376 |
377 | foreach (var handle in handles)
378 | {
379 | foreach (var stream in handle.Streams)
380 | {
381 | stream.Value.Dispose();
382 | gridfs.DeleteById(stream.Key);
383 | }
384 | }
385 | }
386 |
387 | #region AckSend
388 | ///
389 | /// Ack handle and send payload to queue, atomically, with earliestGet as Now, 0.0 priority, new timestamp and no gridfs streams
390 | ///
391 | /// handle to ack received from Get()
392 | /// payload to send
393 | /// handle or payload is null
394 | public void AckSend(Handle handle, BsonDocument payload)
395 | {
396 | AckSend(handle, payload, DateTime.UtcNow);
397 | }
398 |
399 | ///
400 | /// Ack handle and send payload to queue, atomically, with 0.0 priority, new timestamp and no gridfs streams
401 | ///
402 | /// handle to ack received from Get()
403 | /// payload to send
404 | /// earliest instant that a call to Get() can return message
405 | /// handle or payload is null
406 | public void AckSend(Handle handle, BsonDocument payload, DateTime earliestGet)
407 | {
408 | AckSend(handle, payload, earliestGet, 0.0);
409 | }
410 |
411 | ///
412 | /// Ack handle and send payload to queue, atomically, with new timestamp and no gridfs streams
413 | ///
414 | /// handle to ack received from Get()
415 | /// payload to send
416 | /// earliest instant that a call to Get() can return message
417 | /// priority for order out of Get(). 0 is higher priority than 1
418 | /// handle or payload is null
419 | /// priority was NaN
420 | public void AckSend(Handle handle, BsonDocument payload, DateTime earliestGet, double priority)
421 | {
422 | AckSend(handle, payload, earliestGet, priority, true);
423 | }
424 |
425 | ///
426 | /// Ack handle and send payload to queue, atomically, with no gridfs streams
427 | ///
428 | /// handle to ack received from Get()
429 | /// payload to send
430 | /// earliest instant that a call to Get() can return message
431 | /// priority for order out of Get(). 0 is higher priority than 1
432 | /// true to give the payload a new timestamp or false to use given message timestamp
433 | /// handle or payload is null
434 | /// priority was NaN
435 | public void AckSend(Handle handle, BsonDocument payload, DateTime earliestGet, double priority, bool newTimestamp)
436 | {
437 | AckSend(handle, payload, earliestGet, priority, newTimestamp, new KeyValuePair[0]);
438 | }
439 |
440 | ///
441 | /// Ack handle and send payload to queue, atomically.
442 | ///
443 | /// handle to ack received from Get()
444 | /// payload to send
445 | /// earliest instant that a call to Get() can return message
446 | /// priority for order out of Get(). 0 is higher priority than 1
447 | /// true to give the payload a new timestamp or false to use given message timestamp
448 | /// streams to upload into gridfs or null to forward handle's streams
449 | /// handle or payload is null
450 | /// priority was NaN
451 | public void AckSend(Handle handle, BsonDocument payload, DateTime earliestGet, double priority, bool newTimestamp, IEnumerable> streams)
452 | {
453 | if (handle == null) throw new ArgumentNullException("handle");
454 | if (payload == null) throw new ArgumentNullException("payload");
455 | if (Double.IsNaN(priority)) throw new ArgumentException("priority was NaN", "priority");
456 |
457 | var toSet = new BsonDocument
458 | {
459 | {"payload", payload},
460 | {"running", false},
461 | {"resetTimestamp", DateTime.MaxValue},
462 | {"earliestGet", earliestGet},
463 | {"priority", priority},
464 | };
465 | if (newTimestamp)
466 | toSet["created"] = DateTime.UtcNow;
467 |
468 | if (streams != null)
469 | {
470 | var streamIds = new BsonArray();
471 | foreach (var stream in streams)
472 | streamIds.Add(gridfs.Upload(stream.Value, stream.Key).Id);
473 |
474 | toSet["streams"] = streamIds;
475 | }
476 |
477 | //using upsert because if no documents found then the doc was removed (SHOULD ONLY HAPPEN BY SOMEONE MANUALLY) so we can just send
478 | collection.Update(new QueryDocument("_id", handle.Id), new UpdateDocument("$set", toSet), UpdateFlags.Upsert);
479 |
480 | foreach (var existingStream in handle.Streams)
481 | existingStream.Value.Dispose();
482 |
483 | if (streams != null)
484 | {
485 | foreach (var existingStream in handle.Streams)
486 | gridfs.DeleteById(existingStream.Key);
487 | }
488 | }
489 | #endregion
490 |
491 | #region Send
492 | ///
493 | /// Send message to queue with earliestGet as Now, 0.0 priority and no gridfs streams
494 | ///
495 | /// payload
496 | /// payload is null
497 | public void Send(BsonDocument payload)
498 | {
499 | Send(payload, DateTime.UtcNow, 0.0);
500 | }
501 |
502 | ///
503 | /// Send message to queue with 0.0 priority and no gridfs streams
504 | ///
505 | /// payload
506 | /// earliest instant that a call to Get() can return message
507 | /// payload is null
508 | public void Send(BsonDocument payload, DateTime earliestGet)
509 | {
510 | Send(payload, earliestGet, 0.0);
511 | }
512 |
513 | ///
514 | /// Send message to queue with no gridfs streams
515 | ///
516 | /// payload
517 | /// earliest instant that a call to Get() can return message
518 | /// priority for order out of Get(). 0 is higher priority than 1
519 | /// payload is null
520 | /// priority was NaN
521 | public void Send(BsonDocument payload, DateTime earliestGet, double priority)
522 | {
523 | Send(payload, earliestGet, priority, new List>());
524 | }
525 |
526 | ///
527 | /// Send message to queue.
528 | ///
529 | /// payload
530 | /// earliest instant that a call to Get() can return message
531 | /// priority for order out of Get(). 0 is higher priority than 1
532 | /// streams to upload into gridfs
533 | /// payload is null
534 | /// priority was NaN
535 | /// streams is null
536 | public void Send(BsonDocument payload, DateTime earliestGet, double priority, IEnumerable> streams)
537 | {
538 | if (payload == null) throw new ArgumentNullException("payload");
539 | if (Double.IsNaN(priority)) throw new ArgumentException("priority was NaN", "priority");
540 | if (streams == null) throw new ArgumentNullException("streams");
541 |
542 | var streamIds = new BsonArray();
543 | foreach (var stream in streams)
544 | streamIds.Add(gridfs.Upload(stream.Value, stream.Key).Id);
545 |
546 | var message = new BsonDocument
547 | {
548 | {"payload", payload},
549 | {"running", false},
550 | {"resetTimestamp", DateTime.MaxValue},
551 | {"earliestGet", earliestGet},
552 | {"priority", priority},
553 | {"created", DateTime.UtcNow},
554 | {"streams", streamIds},
555 | };
556 |
557 | collection.Insert(message);
558 | }
559 | #endregion
560 |
561 | private void EnsureIndex(IndexKeysDocument index)
562 | {
563 | //if index is a prefix of any existing index we are good
564 | foreach (var existingIndex in collection.GetIndexes())
565 | {
566 | var names = index.Names;
567 | var values = index.Values;
568 | var existingNamesPrefix = existingIndex.Key.Names.Take(names.Count());
569 | var existingValuesPrefix = existingIndex.Key.Values.Take(values.Count());
570 |
571 | if (Enumerable.SequenceEqual(names, existingNamesPrefix) && Enumerable.SequenceEqual(values, existingValuesPrefix))
572 | return;
573 | }
574 |
575 | for (var i = 0; i < 5; ++i)
576 | {
577 | for (var name = Guid.NewGuid().ToString(); name.Length > 0; name = name.Substring(0, name.Length - 1))
578 | {
579 | //creating an index with the same name and different spec does nothing.
580 | //creating an index with same spec and different name does nothing.
581 | //so we use any generated name, and then find the right spec after we have called, and just go with that name.
582 |
583 | try
584 | {
585 | collection.CreateIndex(index, new IndexOptionsDocument { {"name", name }, { "background", true } });
586 | }
587 | catch (MongoCommandException)
588 | {
589 | //this happens when the name was too long
590 | }
591 |
592 | foreach (var existingIndex in collection.GetIndexes())
593 | {
594 | if (existingIndex.Key == index)
595 | return;
596 | }
597 | }
598 | }
599 |
600 | throw new Exception("couldnt create index after 5 attempts");
601 | }
602 |
603 | ///
604 | /// Gets a random double between min and max using RNGCryptoServiceProvider
605 | ///
606 | ///
607 | /// random double.
608 | ///
609 | internal static double GetRandomDouble(double min, double max)
610 | {
611 | if (Double.IsNaN(min)) throw new ArgumentException("min cannot be NaN");
612 | if (Double.IsNaN(max)) throw new ArgumentException("max cannot be NaN");
613 | if (max < min) throw new ArgumentException("max cannot be less than min");
614 |
615 | var buffer = new byte[8];
616 | new RNGCryptoServiceProvider().GetBytes(buffer);
617 | var randomULong = BitConverter.ToUInt64(buffer, 0);
618 |
619 | var fraction = (double)randomULong / (double)ulong.MaxValue;
620 | var fractionOfNewRange = fraction * (max - min);
621 | return min + fractionOfNewRange;
622 | }
623 | }
624 |
625 | ///
626 | /// Message to be given out of Get()
627 | ///
628 | public sealed class Message
629 | {
630 | public readonly Handle Handle;
631 | public readonly BsonDocument Payload;
632 | public readonly IDictionary Streams;
633 |
634 | ///
635 | /// Construct Message
636 | ///
637 | /// handle
638 | /// payload
639 | /// streams
640 | internal Message(Handle handle, BsonDocument payload, IDictionary streams)
641 | {
642 | this.Handle = handle;
643 | this.Payload = payload;
644 | this.Streams = streams;
645 | }
646 | }
647 |
648 | ///
649 | /// Message handle to be given to Ack() and AckSend().
650 | ///
651 | public sealed class Handle
652 | {
653 | internal readonly BsonObjectId Id;
654 | internal readonly IEnumerable> Streams;
655 |
656 | ///
657 | /// Construct Handle
658 | ///
659 | /// id
660 | /// streams
661 | internal Handle(BsonObjectId id, IEnumerable> streams)
662 | {
663 | this.Id = id;
664 | this.Streams = streams;
665 | }
666 | }
667 | }
668 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2013 Dominion Enterprises
4 |
5 | Permission is hereby granted, free of charge, to any person
6 | obtaining a copy of this software and associated documentation
7 | files (the "Software"), to deal in the Software without
8 | restriction, including without limitation the rights to use,
9 | copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the
11 | Software is furnished to do so, subject to the following
12 | conditions:
13 |
14 | The above copyright notice and this permission notice shall be
15 | included in all copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 | OTHER DEALINGS IN THE SOFTWARE.
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | #mongo-queue-csharp
2 | [](https://travis-ci.org/dominionenterprises/mongo-queue-csharp)
3 |
4 | C# message queue using MongoDB as a backend.
5 | Adheres to the 1.0.0 [specification](https://github.com/dominionenterprises/mongo-queue-specification).
6 |
7 | ##Features
8 |
9 | * Message selection and/or count via MongoDB query
10 | * Distributes across machines via MongoDB
11 | * Multi language support through the [specification](https://github.com/dominionenterprises/mongo-queue-specification)
12 | * Message priority
13 | * Delayed messages
14 | * Running message timeout and redeliver
15 | * Atomic acknowledge and send together
16 | * Easy index creation based only on payload
17 |
18 | ##Simplest use
19 |
20 | ```csharp
21 | using System;
22 | using MongoDB.Bson;
23 | using MongoDB.Driver;
24 | using DominionEnterprises.Mongo;
25 |
26 | var queue = new Queue("mongodb://localhost", "queues", "queue");
27 | queue.Send(new BsonDocument());
28 | var message = queue.Get(new QueryDocument(), TimeSpan.FromMinutes(1));
29 | queue.Ack(message);
30 | ```
31 |
32 | ##Build
33 |
34 | For linux make sure [Mono](https://github.com/mono/mono) which comes with [xbuild](http://www.mono-project.com/Microsoft.Build) is installed.
35 | For windows make sure [.NET SDK](http://www.microsoft.com/en-us/download/details.aspx?id=8279) which comes with
36 | [MSBuild](http://msdn.microsoft.com/en-us/library/dd393574.aspx) is installed.
37 | Make sure you are in the repository root.
38 |
39 | For linux run:
40 |
41 | ```bash
42 | xbuild DominionEnterprises.Mongo/DominionEnterprises.Mongo.csproj
43 | ```
44 |
45 | For windows run:
46 |
47 | ```dos
48 | C:\Windows\Microsoft.NET\Framework64\v4.0.30319\MSBuild.exe DominionEnterprises.Mongo\DominionEnterprises.Mongo.csproj
49 | ```
50 |
51 | and use the resulting `DominionEnterprises.Mongo/bin/Debug/DominionEnterprises.Mongo.dll` as a reference in your project.
52 |
53 | ##Documentation
54 |
55 | Found in the [source](DominionEnterprises.Mongo/Queue.cs) itself, take a look!
56 |
57 | ##Contact
58 |
59 | Developers may be contacted at:
60 |
61 | * [Pull Requests](https://github.com/dominionenterprises/mongo-queue-php/pulls)
62 | * [Issues](https://github.com/dominionenterprises/mongo-queue-php/issues)
63 |
64 | ##Project build
65 |
66 | Install and start [mongodb](http://www.mongodb.org).
67 | For linux make sure [Mono](https://github.com/mono/mono) which comes with [xbuild](http://www.mono-project.com/Microsoft.Build) is installed.
68 | For windows make sure [.NET SDK](http://www.microsoft.com/en-us/download/details.aspx?id=8279) which comes with
69 | [MSBuild](http://msdn.microsoft.com/en-us/library/dd393574.aspx) is installed.
70 | For both make sure [nunit-console](http://www.nunit.org/index.php?p=nunit-console&r=2.2.10) is installed.
71 | Make sure you are in the repository root.
72 |
73 | For linux run:
74 |
75 | ```bash
76 | sh build.sh
77 | ```
78 |
79 | For windows run:
80 |
81 | ```dos
82 | build.bat
83 | ```
84 |
--------------------------------------------------------------------------------
/build.bat:
--------------------------------------------------------------------------------
1 | C:\Windows\Microsoft.NET\Framework64\v4.0.30319\MSBuild.exe DominionEnterprises.Mongo.Tests\DominionEnterprises.Mongo.Tests.csproj
2 | if %errorlevel% neq 0 exit /B 1
3 | tools.SharpCover\SharpCover.exe instrument travisCoverageConfig.json
4 | if %errorlevel% neq 0 exit /B 1
5 | "C:\Program Files (x86)\NUnit 2.6.2\bin\nunit-console.exe" DominionEnterprises.Mongo.Tests\bin\Debug\DominionEnterprises.Mongo.Tests.dll
6 | if %errorlevel% neq 0 exit /B 1
7 | tools.SharpCover\SharpCover.exe check
8 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | xbuild DominionEnterprises.Mongo.Tests/DominionEnterprises.Mongo.Tests.csproj \
2 | && mono tools.SharpCover/SharpCover.exe instrument travisCoverageConfig.json \
3 | && nunit-console DominionEnterprises.Mongo.Tests/bin/Debug/DominionEnterprises.Mongo.Tests.dll \
4 | && mono tools.SharpCover/SharpCover.exe check
5 |
--------------------------------------------------------------------------------
/libs/MongoDB.Bson.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/libs/MongoDB.Bson.dll
--------------------------------------------------------------------------------
/libs/MongoDB.Driver.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/libs/MongoDB.Driver.dll
--------------------------------------------------------------------------------
/libs/nunit.framework.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/libs/nunit.framework.dll
--------------------------------------------------------------------------------
/tools.SharpCover/Counter.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/Counter.dll
--------------------------------------------------------------------------------
/tools.SharpCover/Mono.Cecil.Mdb.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/Mono.Cecil.Mdb.dll
--------------------------------------------------------------------------------
/tools.SharpCover/Mono.Cecil.Pdb.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/Mono.Cecil.Pdb.dll
--------------------------------------------------------------------------------
/tools.SharpCover/Mono.Cecil.Rocks.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/Mono.Cecil.Rocks.dll
--------------------------------------------------------------------------------
/tools.SharpCover/Mono.Cecil.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/Mono.Cecil.dll
--------------------------------------------------------------------------------
/tools.SharpCover/Newtonsoft.Json.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/Newtonsoft.Json.dll
--------------------------------------------------------------------------------
/tools.SharpCover/SharpCover.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/traderinteractive/mongo-queue-csharp/91c3f417b55a5fc4a3c0fa0ac5ab92127083f70e/tools.SharpCover/SharpCover.exe
--------------------------------------------------------------------------------
/travisCoverageConfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "assemblies": [
3 | "DominionEnterprises.Mongo.Tests/bin/Debug/DominionEnterprises.Mongo.dll"
4 | ],
5 | "methodBodyExcludes": [
6 | {
7 | "method": "DominionEnterprises.Mongo.Message DominionEnterprises.Mongo.Queue::Get(MongoDB.Driver.QueryDocument,System.TimeSpan,System.TimeSpan,System.TimeSpan,System.Boolean)",
8 | "lines": ["poll = TimeSpan.FromMilliseconds(int.MaxValue);", "throw e;//cant cover"]
9 | }
10 | ]
11 | }
12 |
--------------------------------------------------------------------------------