2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.10 1996-03-19 19:22:44 quinn
10 * Revision 1.9 1996/02/06 10:19:57 quinn
11 * Attempt at fixing bug. Not all blocks were read before they were unlinked
12 * prior to a remap operation.
14 * Revision 1.8 1996/01/29 09:47:11 quinn
15 * Fixed mean little bug in the read-table code.
17 * Revision 1.7 1995/12/06 14:48:27 quinn
18 * Fixed some strange bugs.
20 * Revision 1.6 1995/09/04 12:33:47 adam
21 * Various cleanup. YAZ util used instead.
23 * Revision 1.5 1994/09/28 11:29:33 quinn
24 * Added cmp parameter.
26 * Revision 1.4 1994/09/27 20:03:53 quinn
27 * Seems relatively bug-free.
29 * Revision 1.3 1994/09/26 17:11:31 quinn
32 * Revision 1.2 1994/09/26 17:06:36 quinn
35 * Revision 1.1 1994/09/26 16:07:57 quinn
36 * Most of the functionality in place.
41 * This module handles the representation of tables in the bfiles.
49 static int is_freestore_alloc(ISAM is, int type)
53 if (is->types[type].freelist >= 0)
55 tmp = is->types[type].freelist;
56 if (bf_read(is->types[type].bf, tmp, 0, sizeof(tmp),
57 &is->types[type].freelist) <=0)
59 logf (LOG_FATAL, "Failed to allocate block");
64 tmp = is->types[type].top++;
66 logf (LOG_DEBUG, "Allocating block #%d", tmp);
70 static void is_freestore_free(ISAM is, int type, int block)
74 logf (LOG_DEBUG, "Releasing block #%d", block);
75 tmp = is->types[type].freelist;
76 is->types[type].freelist = block;
77 if (bf_write(is->types[type].bf, block, 0, sizeof(tmp), &tmp) < 0)
79 logf (LOG_FATAL, "Failed to deallocate block.");
84 /* this code must be modified to handle an index */
85 int is_p_read_partial(is_mtable *tab, is_mblock *block)
90 assert(block->state == IS_MBSTATE_UNREAD);
91 block->data = buf = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
92 toread = tab->is->types[tab->pos_type].blocksize;
93 if (toread > is_mbuf_size[buf->type])
95 toread = is_mbuf_size[buf->type];
96 block->state = IS_MBSTATE_PARTIAL;
99 block->state = IS_MBSTATE_CLEAN;
100 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, 0, toread,
103 logf (LOG_FATAL, "bfread failed.");
106 /* extract header info */
108 memcpy(&block->num_records, buf->data, sizeof(block->num_records));
109 buf->offset += sizeof(block->num_records);
110 memcpy(&block->nextpos, buf->data + buf->offset,
111 sizeof(block->nextpos));
112 buf->offset += sizeof(block->nextpos);
113 if (block == tab->data) /* first block */
115 memcpy(&tab->num_records, buf->data + buf->offset,
116 sizeof(tab->num_records));
117 buf->offset +=sizeof(tab->num_records);
119 buf->num = (toread - buf->offset) / is_keysize(tab->is);
120 if (buf->num >= block->num_records)
122 buf->num = block->num_records;
123 block->state = IS_MBSTATE_CLEAN;
126 block->bread = buf->offset + buf->num * is_keysize(tab->is);
130 int is_p_read_full(is_mtable *tab, is_mblock *block)
135 if (block->state == IS_MBSTATE_UNREAD && is_p_read_partial(tab, block) < 0)
137 logf (LOG_FATAL, "partial read failed.");
140 if (block->state == IS_MBSTATE_PARTIAL)
143 dread = block->data->num;
144 while (dread < block->num_records)
146 buf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
149 toread = is_mbuf_size[buf->type] / is_keysize(tab->is);
150 if (toread > block->num_records - dread)
151 toread = block->num_records - dread;
153 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, block->bread, toread *
154 is_keysize(tab->is), buf->data) < 0)
156 logf (LOG_FATAL, "bfread failed.");
162 block->bread += toread * is_keysize(tab->is);
164 block->state = IS_MBSTATE_CLEAN;
166 logf (LOG_DEBUG, "R: Block #%d contains %d records.", block->diskpos, block->num_records);
171 * write dirty blocks to bfile.
172 * Allocate blocks as necessary.
174 void is_p_sync(is_mtable *tab)
179 isam_blocktype *type;
181 type = &tab->is->types[tab->pos_type];
182 for (p = tab->data; p; p = p->next)
184 if (p->state < IS_MBSTATE_DIRTY)
186 /* make sure that blocks are allocated. */
188 p->diskpos = is_freestore_alloc(tab->is, tab->pos_type);
191 if (p->next->diskpos < 0)
192 p->nextpos = p->next->diskpos = is_freestore_alloc(tab->is,
195 p->nextpos = p->next->diskpos;
200 memcpy(type->dbuf, &p->num_records, sizeof(p->num_records));
201 sum += sizeof(p->num_records);
202 memcpy(type->dbuf + sum, &p->nextpos, sizeof(p->nextpos));
203 sum += sizeof(p->nextpos);
204 if (p == tab->data) /* first block */
206 memcpy(type->dbuf + sum, &tab->num_records,
207 sizeof(tab->num_records));
208 sum += sizeof(tab->num_records);
210 for (b = p->data; b; b = b->next)
212 memcpy(type->dbuf + sum, b->data + b->offset, v = b->num *
213 is_keysize(tab->is));
216 assert(sum <= type->blocksize);
218 if (bf_write(type->bf, p->diskpos, 0, sum, type->dbuf) < 0)
220 logf (LOG_FATAL, "Failed to write block.");
223 logf (LOG_DEBUG, "W: Block #%d contains %d records.", p->diskpos, p->num_records);
228 * Free all disk blocks associated with table.
230 void is_p_unmap(is_mtable *tab)
234 for (p = tab->data; p; p = p->next)
238 is_freestore_free(tab->is, tab->pos_type, p->diskpos);
244 static is_mbuf *mbuf_takehead(is_mbuf **mb, int *num, int keysize)
246 is_mbuf *p = 0, **pp = &p, *new;
251 while (*mb && toget >= (*mb)->num)
259 if (toget > 0 && *mb)
261 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
262 new->next = (*mb)->next;
264 new->data = (*mb)->data;
266 new->offset = (*mb)->offset + toget * keysize;
267 new->num = (*mb)->num - toget;
279 * Split up individual blocks which have grown too large.
280 * is_p_align and is_p_remap are alternative functions which trade off
281 * speed in updating versus optimum usage of disk blocks.
283 void is_p_align(is_mtable *tab)
285 is_mblock *mblock, *new, *last = 0, *next;
286 is_mbuf *mbufs, *mbp;
287 int blocks, recsblock;
289 logf (LOG_DEBUG, "Realigning table.");
290 for (mblock = tab->data; mblock; mblock = next)
293 if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records == 0)
297 last->next = mblock->next;
298 last->state = IS_MBSTATE_DIRTY;
303 tab->data = tab->data->next;
307 if (next->state < IS_MBSTATE_CLEAN)
309 if (is_p_read_full(tab, next) < 0)
311 logf(LOG_FATAL, "Error during re-alignment");
314 if (next->nextpos && !next->next)
316 next->next = xmalloc_mblock();
317 next->next->diskpos = next->nextpos;
318 next->next->state = IS_MBSTATE_UNREAD;
319 next->next->data = 0;
322 next->state = IS_MBSTATE_DIRTY; /* force re-process */
325 if (mblock->diskpos >= 0)
326 is_freestore_free(tab->is, tab->pos_type, mblock->diskpos);
327 xrelease_mblock(mblock);
329 else if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records >
330 (mblock == tab->data ?
331 tab->is->types[tab->pos_type].max_keys_block0 :
332 tab->is->types[tab->pos_type].max_keys_block))
334 blocks = tab->num_records /
335 tab->is->types[tab->pos_type].nice_keys_block;
336 if (tab->num_records %
337 tab->is->types[tab->pos_type].nice_keys_block)
339 recsblock = tab->num_records / blocks;
342 mbufs = mblock->data;
343 while ((mbp = mbuf_takehead(&mbufs, &recsblock,
344 is_keysize(tab->is))) && recsblock)
348 new = xmalloc_mblock();
350 new->state = IS_MBSTATE_DIRTY;
351 new->next = mblock->next;
355 mblock->num_records = recsblock;
357 mblock = mblock->next;
367 * Reorganize data in blocks for minimum block usage and quick access.
368 * Free surplus blocks.
369 * is_p_align and is_p_remap are alternative functions which trade off
370 * speed in updating versus optimum usage of disk blocks.
372 void is_p_remap(is_mtable *tab)
374 is_mbuf *mbufs, **bufpp, *mbp;
375 is_mblock *blockp, **blockpp;
376 int recsblock, blocks;
378 logf (LOG_DEBUG, "Remapping table.");
379 /* collect all data */
381 for (blockp = tab->data; blockp; blockp = blockp->next)
383 if (blockp->state < IS_MBSTATE_CLEAN && is_m_read_full(tab, blockp) < 0)
385 logf (LOG_FATAL, "Read-full failed in remap.");
388 *bufpp = blockp->data;
390 bufpp = &(*bufpp)->next;
393 blocks = tab->num_records / tab->is->types[tab->pos_type].nice_keys_block;
394 if (tab->num_records % tab->is->types[tab->pos_type].nice_keys_block)
398 recsblock = tab->num_records / blocks + 1;
399 if (recsblock > tab->is->types[tab->pos_type].nice_keys_block)
401 blockpp = &tab->data;
402 while ((mbp = mbuf_takehead(&mbufs, &recsblock, is_keysize(tab->is))) &&
407 *blockpp = xmalloc_mblock();
408 (*blockpp)->diskpos = -1;
410 (*blockpp)->data = mbp;
411 (*blockpp)->num_records = recsblock;
412 (*blockpp)->state = IS_MBSTATE_DIRTY;
413 blockpp = &(*blockpp)->next;
419 for (blockp = *blockpp; blockp; blockp = blockp->next)
420 if (blockp->diskpos >= 0)
421 is_freestore_free(tab->is, tab->pos_type, blockp->diskpos);
422 xfree_mblocks(*blockpp);