1/*
2 * The contents of this file are subject to the Initial
3 * Developer's Public License Version 1.0 (the "License");
4 * you may not use this file except in compliance with the
5 * License. You may obtain a copy of the License at
6 * http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
7 *
8 * Software distributed under the License is distributed AS IS,
9 * WITHOUT WARRANTY OF ANY KIND, either express or implied.
10 * See the License for the specific language governing rights
11 * and limitations under the License.
12 *
13 * The Original Code was created by Dmitry Yemanov
14 * for the Firebird Open Source RDBMS project.
15 *
16 * Copyright (c) 2006 Dmitry Yemanov <dimitr@users.sf.net>
17 * and all contributors signed below.
18 *
19 * All Rights Reserved.
20 * Contributor(s): ______________________________________.
21 */
22
23#include "firebird.h"
24
25#include "iberror.h"
26#include "../common/config/config.h"
27#include "../common/config/dir_list.h"
28#include "../common/gdsassert.h"
29#include "../yvalve/gds_proto.h"
30#include "../jrd/err_proto.h"
31#include "../common/isc_proto.h"
32#include "../common/os/path_utils.h"
33
34#include "../jrd/TempSpace.h"
35
36using Firebird::TempFile;
37
38// Static definitions/initializations
39
40const size_t MIN_TEMP_BLOCK_SIZE = 64 * 1024;
41
42Firebird::GlobalPtr<Firebird::Mutex> TempSpace::initMutex;
43Firebird::TempDirectoryList* TempSpace::tempDirs = NULL;
44size_t TempSpace::minBlockSize = 0;
45offset_t TempSpace::globalCacheUsage = 0;
46
47//
48// In-memory block class
49//
50
51size_t TempSpace::MemoryBlock::read(offset_t offset, void* buffer, size_t length)
52{
53 if (offset + length > size)
54 {
55 length = size - offset;
56 }
57 memcpy(buffer, ptr + offset, length);
58 return length;
59}
60
61size_t TempSpace::MemoryBlock::write(offset_t offset, const void* buffer, size_t length)
62{
63 if (offset + length > size)
64 {
65 length = size - offset;
66 }
67 memcpy(ptr + offset, buffer, length);
68 return length;
69}
70
71//
72// On-disk block class
73//
74
75size_t TempSpace::FileBlock::read(offset_t offset, void* buffer, size_t length)
76{
77 if (offset + length > size)
78 {
79 length = size - offset;
80 }
81 offset += seek;
82 return file->read(offset, buffer, length);
83}
84
85size_t TempSpace::FileBlock::write(offset_t offset, const void* buffer, size_t length)
86{
87 if (offset + length > size)
88 {
89 length = size - offset;
90 }
91 offset += seek;
92 return file->write(offset, buffer, length);
93}
94
95//
96// TempSpace::TempSpace
97//
98// Constructor
99//
100
101TempSpace::TempSpace(MemoryPool& p, const Firebird::PathName& prefix, bool dynamic)
102 : pool(p), filePrefix(p, prefix),
103 logicalSize(0), physicalSize(0), localCacheUsage(0),
104 head(NULL), tail(NULL), tempFiles(p),
105 initialBuffer(p), initiallyDynamic(dynamic),
106 freeSegments(p)
107{
108 if (!tempDirs)
109 {
110 Firebird::MutexLockGuard guard(initMutex, FB_FUNCTION);
111 if (!tempDirs)
112 {
113 MemoryPool& def_pool = *getDefaultMemoryPool();
114 tempDirs = FB_NEW(def_pool) Firebird::TempDirectoryList(def_pool);
115 minBlockSize = Config::getTempBlockSize();
116
117 if (minBlockSize < MIN_TEMP_BLOCK_SIZE)
118 minBlockSize = MIN_TEMP_BLOCK_SIZE;
119 else
120 minBlockSize = FB_ALIGN(minBlockSize, MIN_TEMP_BLOCK_SIZE);
121 }
122 }
123}
124
125//
126// TempSpace::~TempSpace
127//
128// Destructor
129//
130
131TempSpace::~TempSpace()
132{
133 while (head)
134 {
135 Block* temp = head->next;
136 delete head;
137 head = temp;
138 }
139
140 globalCacheUsage -= localCacheUsage;
141
142 while (tempFiles.getCount())
143 {
144 delete tempFiles.pop();
145 }
146}
147
148//
149// TempSpace::read
150//
151// Reads bytes from the temporary space
152//
153
154size_t TempSpace::read(offset_t offset, void* buffer, size_t length)
155{
156 fb_assert(offset + length <= logicalSize);
157
158 if (length)
159 {
160 // search for the first needed block
161 Block* block = findBlock(offset);
162
163 UCHAR* p = static_cast<UCHAR*>(buffer);
164 size_t l = length;
165
166 // read data from the block chain
167 for (Block* itr = block; itr && l; itr = itr->next, offset = 0)
168 {
169 const size_t n = itr->read(offset, p, l);
170 p += n;
171 l -= n;
172 }
173
174 fb_assert(!l);
175 }
176
177 return length;
178}
179
180//
181// TempSpace::write
182//
183// Writes bytes to the temporary space
184//
185
186size_t TempSpace::write(offset_t offset, const void* buffer, size_t length)
187{
188 fb_assert(offset <= logicalSize);
189
190 if (offset + length > logicalSize)
191 {
192 // not enough space, allocate one more block
193 extend(offset + length - logicalSize);
194 }
195
196 if (length)
197 {
198 // search for the first needed block
199 Block* const block = findBlock(offset);
200
201 const UCHAR* p = static_cast<const UCHAR*>(buffer);
202 size_t l = length;
203
204 // write data to as many blocks as necessary
205 for (Block* itr = block; itr && l; itr = itr->next, offset = 0)
206 {
207 const size_t n = itr->write(offset, p, l);
208 p += n;
209 l -= n;
210 }
211
212 fb_assert(!l);
213 }
214
215 return length;
216}
217
218//
219// TempSpace::extend
220//
221// Increases size of the temporary space
222//
223
224void TempSpace::extend(size_t size)
225{
226 logicalSize += size;
227
228 if (logicalSize > physicalSize)
229 {
230 const size_t initialSize = initialBuffer.getCount();
231
232 // If the dynamic mode is specified, then we allocate new blocks
233 // by growing the same initial memory block in the specified chunks.
234 // Once the limit (64KB) is reached, we switch to the generic algorithm
235 // (1MB blocks), copy the existing data there and free the initial buffer.
236 //
237 // This mode should not be used if the caller never works with small blocks.
238 // Also, it MUST NOT be used if the caller deals with inMemory() or allocateBatch()
239 // routines and caches the pointers to use them later. These pointers may become
240 // invalid after resizing the initial block or after switching to large blocks.
241
242 if (initiallyDynamic && logicalSize < MIN_TEMP_BLOCK_SIZE)
243 {
244 // allocate or extend the initial dynamic block, it will grow up to 64KB
245 if (!initialSize)
246 {
247 fb_assert(!head && !tail);
248 head = tail = FB_NEW(pool) InitialBlock(initialBuffer.getBuffer(size), size);
249 }
250 else
251 {
252 fb_assert(head == tail);
253 size += initialSize;
254 initialBuffer.resize(size);
255 new (head) InitialBlock(initialBuffer.begin(), size);
256 }
257
258 physicalSize = size;
259 return;
260 }
261
262 if (initialSize)
263 {
264 fb_assert(head == tail);
265 delete head;
266 head = tail = NULL;
267 size = FB_ALIGN(logicalSize, minBlockSize);
268 physicalSize = size;
269 }
270 else
271 {
272 size = FB_ALIGN(logicalSize - physicalSize, minBlockSize);
273 physicalSize += size;
274 }
275
276 Block* block = NULL;
277
278 if (globalCacheUsage + size <= size_t(Config::getTempCacheLimit()))
279 {
280 try
281 {
282 // allocate block in virtual memory
283 block = FB_NEW(pool) MemoryBlock(FB_NEW(pool) UCHAR[size], tail, size);
284 localCacheUsage += size;
285 globalCacheUsage += size;
286 }
287 catch (const Firebird::BadAlloc&)
288 {
289 // not enough memory
290 }
291 }
292
293 if (!block)
294 {
295 // allocate block in the temp file
296 TempFile* const file = setupFile(size);
297 fb_assert(file);
298 if (tail && tail->sameFile(file))
299 {
300 fb_assert(!initialSize);
301 tail->size += size;
302 return;
303 }
304 block = FB_NEW(pool) FileBlock(file, tail, size);
305 }
306
307 // preserve the initial contents, if any
308 if (initialSize)
309 {
310 block->write(0, initialBuffer.begin(), initialSize);
311 initialBuffer.free();
312 }
313
314 // append new block to the chain
315 if (!head)
316 {
317 head = block;
318 }
319 tail = block;
320 }
321}
322
323//
324// TempSpace::findBlock
325//
326// Locates the space block corresponding to the given global offset
327//
328
329TempSpace::Block* TempSpace::findBlock(offset_t& offset) const
330{
331 fb_assert(offset <= logicalSize);
332
333 Block* block = NULL;
334
335 if (offset < physicalSize / 2)
336 {
337 // walk forward
338 block = head;
339 while (block && offset >= block->size)
340 {
341 offset -= block->size;
342 block = block->next;
343 }
344 fb_assert(block);
345 }
346 else
347 {
348 // walk backward
349 block = tail;
350 while (block && physicalSize - offset > block->size)
351 {
352 offset += block->size;
353 block = block->prev;
354 }
355 fb_assert(block);
356 offset -= physicalSize - block->size;
357 }
358
359 fb_assert(offset <= block->size);
360 return block;
361}
362
363//
364// TempSpace::setupFile
365//
366// Allocates the required space in some temporary file
367//
368
369TempFile* TempSpace::setupFile(size_t size)
370{
371 ISC_STATUS_ARRAY status_vector = {0};
372
373 for (size_t i = 0; i < tempDirs->getCount(); i++)
374 {
375 TempFile* file = NULL;
376
377 Firebird::PathName directory = (*tempDirs)[i];
378 PathUtils::ensureSeparator(directory);
379
380 for (size_t j = 0; j < tempFiles.getCount(); j++)
381 {
382 Firebird::PathName dirname, filename;
383 PathUtils::splitLastComponent(dirname, filename, tempFiles[j]->getName());
384 PathUtils::ensureSeparator(dirname);
385 if (!directory.compare(dirname))
386 {
387 file = tempFiles[j];
388 break;
389 }
390 }
391
392 try
393 {
394 if (!file)
395 {
396 file = FB_NEW(pool) TempFile(pool, filePrefix, directory);
397 tempFiles.add(file);
398 }
399
400 file->extend(size);
401 }
402 catch (const Firebird::system_error& ex)
403 {
404 ex.stuff_exception(status_vector);
405 continue;
406 }
407
408 return file;
409 }
410
411 // no room in all directories
412 Firebird::Arg::Gds status(isc_out_of_temp_space);
413 status.append(Firebird::Arg::StatusVector(status_vector));
414 iscLogStatus(NULL, status.value());
415 status.raise();
416
417 return NULL; // compiler silencer
418}
419
420//
421// TempSpace::allocateSpace
422//
423// Allocate available space in free segments. Extend file if necessary
424//
425
426offset_t TempSpace::allocateSpace(size_t size)
427{
428 // Find the best available space. This is defined as the smallest free space
429 // that is big enough. This preserves large blocks.
430 Segment* best = NULL;
431
432 // Search through the available space in the not used segments list
433 for (bool found = freeSegments.getFirst(); found; found = freeSegments.getNext())
434 {
435 Segment* const space = &freeSegments.current();
436 // If this is smaller than our previous best, use it
437 if (space->size >= size && (!best || (space->size < best->size))) {
438 best = space;
439 }
440 }
441
442 // If we didn't find any space, allocate it at the end of the file
443 if (!best)
444 {
445 extend(size);
446 return getSize() - size;
447 }
448
449 // Set up the return parameters
450 const offset_t position = best->position;
451 best->size -= size;
452 best->position += size;
453
454 // If the hunk was an exact fit, remove the segment from the list
455 if (!best->size)
456 {
457 if (!freeSegments.locate(best->position))
458 fb_assert(false);
459
460 freeSegments.fastRemove();
461 }
462
463 return position;
464}
465
466//
467// TempSpace::releaseSpace
468//
469// Return previously allocated segment back into not used segments list and
470// join it with adjacent segments if found
471//
472
473void TempSpace::releaseSpace(offset_t position, size_t size)
474{
475 fb_assert(size > 0);
476 fb_assert(position < getSize()); // Block starts in file
477 const offset_t end = position + size;
478 fb_assert(end <= getSize()); // Block ends in file
479
480 if (freeSegments.locate(Firebird::locEqual, end))
481 {
482 // The next segment is found to be adjacent
483 Segment* const next_seg = &freeSegments.current();
484 next_seg->position -= size;
485 next_seg->size += size;
486
487 if (freeSegments.getPrev())
488 {
489 // Check the prior segment for being adjacent
490 Segment* const prior_seg = &freeSegments.current();
491 if (position == prior_seg->position + prior_seg->size)
492 {
493 next_seg->position -= prior_seg->size;
494 next_seg->size += prior_seg->size;
495 freeSegments.fastRemove();
496 }
497 }
498
499 return;
500 }
501
502 if (freeSegments.locate(Firebird::locLess, position))
503 {
504 // Check the prior segment for being adjacent
505 Segment* const prior_seg = &freeSegments.current();
506 if (position == prior_seg->position + prior_seg->size)
507 {
508 prior_seg->size += size;
509 return;
510 }
511 }
512
513 freeSegments.add(Segment(position, size));
514}
515
516//
517// TempSpace::inMemory
518//
519// Return contiguous chunk of memory if present at given location
520//
521
522UCHAR* TempSpace::inMemory(offset_t begin, size_t size) const
523{
524 const Block* block = findBlock(begin);
525 return block ? block->inMemory(begin, size) : NULL;
526}
527
528//
529// TempSpace::findMemory
530//
531// Return contiguous chunk of memory and adjust starting offset
532// of search range if found
533//
534
535UCHAR* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const
536{
537 offset_t local_offset = begin;
538 const offset_t save_begin = begin;
539 const Block* block = findBlock(local_offset);
540
541 while (block && (begin + size <= end))
542 {
543 UCHAR* const mem = block->inMemory(local_offset, size);
544 if (mem)
545 {
546 return mem;
547 }
548
549 begin += block->size - local_offset;
550 local_offset = 0;
551 block = block->next;
552 }
553
554 begin = save_begin;
555 return NULL;
556}
557
558//
559// TempSpace::validate
560//
561// Validate internal lists for consistency and return back to caller
562// amount of available free space
563//
564
565bool TempSpace::validate(offset_t& free) const
566{
567 free = 0;
568 FreeSegmentTree::ConstAccessor accessor(&freeSegments);
569 for (bool found = accessor.getFirst(); found; found = accessor.getNext())
570 {
571 const offset_t size = accessor.current().size;
572 fb_assert(size != 0);
573 free += size;
574 }
575
576 offset_t disk = 0;
577 for (size_t i = 0; i < tempFiles.getCount(); i++)
578 disk += tempFiles[i]->getSize();
579
580 return ((initialBuffer.getCount() + localCacheUsage + disk) == physicalSize);
581}
582
583
584//
585// TempSpace::allocateBatch
586//
587// Allocate up to 'count' contiguous chunks of memory available in free
588// segments if any. Adjust size of chunks between minSize and maxSize
589// accordingly to available free space (assuming all of the free space
590// is in memory blocks). Algorithm is very simple and can be improved in future
591//
592
593size_t TempSpace::allocateBatch(size_t count, size_t minSize, size_t maxSize, Segments& segments)
594{
595 // adjust passed chunk size to amount of free memory we have and number
596 // of runs still not allocated.
597 offset_t freeMem = 0;
598
599 for (bool found = freeSegments.getFirst(); found; found = freeSegments.getNext())
600 freeMem += freeSegments.current().size;
601
602 freeMem = MIN(freeMem / count, maxSize);
603 freeMem = MAX(freeMem, minSize);
604 freeMem = MIN(freeMem, minBlockSize);
605 freeMem &= ~(FB_ALIGNMENT - 1);
606
607 bool is_positioned = freeSegments.getFirst();
608 while (segments.getCount() < count && is_positioned)
609 {
610 Segment* freeSpace = &freeSegments.current();
611 offset_t freeSeek = freeSpace->position;
612 const offset_t freeEnd = freeSpace->position + freeSpace->size;
613
614 UCHAR* const mem = findMemory(freeSeek, freeEnd, freeMem);
615
616 if (mem)
617 {
618 fb_assert(freeSeek + freeMem <= freeEnd);
619#ifdef DEV_BUILD
620 offset_t seek1 = freeSeek;
621 UCHAR* const p = findMemory(seek1, freeEnd, freeMem);
622 fb_assert(p == mem);
623 fb_assert(seek1 == freeSeek);
624#endif
625 if (freeSeek != freeSpace->position)
626 {
627 const offset_t skip_size = freeSeek - freeSpace->position;
628 const Segment skip_space(freeSpace->position, skip_size);
629
630 freeSpace->position += skip_size;
631 freeSpace->size -= skip_size;
632 fb_assert(freeSpace->size != 0);
633
634 if (!freeSegments.add(skip_space))
635 fb_assert(false);
636
637 if (!freeSegments.locate(skip_space.position + skip_size))
638 fb_assert(false);
639
640 freeSpace = &freeSegments.current();
641 }
642
643 SegmentInMemory seg;
644 seg.memory = mem;
645 seg.position = freeSeek;
646 seg.size = freeMem;
647 segments.add(seg);
648
649 freeSpace->position += freeMem;
650 freeSpace->size -= freeMem;
651
652 if (!freeSpace->size)
653 {
654 is_positioned = freeSegments.fastRemove();
655 }
656 }
657 else
658 {
659 is_positioned = freeSegments.getNext();
660 }
661 }
662
663 return segments.getCount();
664}
665