2 * Handling of points and other moveable features.
7 /*========== declarations ==========*/
14 typedef struct KindInfo KindInfo;
16 /* Kind-independent code is responsible for determining
17 * the method, doing a bit of cleanup, and adjusting the flow
18 * slightly. Per-kind code does the actual work and is mostly in
19 * charge - it is also responsible for updating seg->moving.
21 /* The following states exist for each MovPosChange
22 * at points when control flow passes between kind and indep:
23 * U Unallocated no memory allocated (MovPosChange does not exist)
24 * A Allocated memory allocation done
25 * R Reserved reservation was successful
26 * C Confirmed motion queued and will occur
27 * D Done motion is complete and callback just needs to be made
28 * E Erroneous indep must call destroy straight away
29 * seg->moving is in one of the states UCD
32 typedef struct MovPosChange { /* valid in: filled in by and when: */
33 const KindInfo *ki; /* ARCDE indep after allocate() */
34 Segment *move; /* ARCDE indep after allocate() */
35 MovPosCallback *on_done; /* CD indep before confirm() */
36 void *u; /* CD indep before confirm() */
37 MovPosComb actual; /* CD see below */
38 /* kind-specific data follows */ /* varies kind-specific code, varies */
40 /* `actual' contains the kind's record of the physical state. It is
41 * initialised by indep (just before confirm) from move->moving->actual
42 * or move->movposcomb as the case may be. It should be updated
43 * by the kind, since it is used by indep for calculating the number
44 * and identities of the features which may need to change when a
45 * new move request is intended to replace an existing one - ie, the
46 * contents of the motions[] provided to a subsequent confirm().
47 * So while a change is Confirmed, the physical state is recorded
48 * only in the relevant change, and not in the segment's movposcomb.
49 * Once a change goes to Confirmed, the indep code never untangles it
50 * so the kind can manage the proper transition.
54 Change *(*allocate)(int alloc_motions); /* U->A (always succeeds) */
55 ErrorCode (*reserve)(Change*, Segment*, int ms); /* A->R; error: A->E */
56 ErrorCode (*confirm)(Change*, Segment*, int n_motions,
57 const Motion*, int ms); /* [AR]->C; error; [AR]->E */
58 void (*destroy)(Change*); /* [ARCE]->U */
59 /* indep guarantees that
60 * alloc_motions >= move->i->n_motions on reserve
61 * alloc_motions >= n_motions on confirm
62 * and that if on entry to reserve move->moving is non-0,
63 * it is of the same kind
67 /*========== points ==========*/
70 * We maintain two queues, one for reserved one for actually confirmed
71 * requests where we know what we're doing.
73 * We divide time into discrete slots, numbered with clock arithmetic.
75 * cslot cslot+1 cslot+2
77 * currently next in after
80 * We increment cslot when we issue a POINT command to the PIC.
81 * In a request, the deadline represents the latest allowable value
82 * of cslot just before that increment.
85 typedef unsigned PtSlot;
86 typedef int PtSlotSigned;
88 /* We think there are three states: Allocated, Reserved and Confirmed.
89 * (plus of course Unallocated where we don't have a request at all).
90 * These correspond to the indep code as follows:
92 * indep state pt state queues checked and plan viable
94 * Allocated Allocated yes
95 * Reserved Reserved yes
96 * Confirmed Confirmed yes
100 * Erroneous exists only after a failed reserve() or confirm() so it's
101 * not that confusing to have this slightly malleable terminology.
104 typedef struct { /* Allocated Reserved Confirmed */
105 /* in queue? absent reserved confirmed */
107 Slot deadline; /* ~0 relative absolute <- */
108 int n_motions; /* alloc'd alloc'd undone */
109 Motion motions[]; /* [0].i: 0 0 non-0 <- */
110 /* [..].i: undef undef non-0 */
111 /* .posn: undef undef defined */
113 /* We can determine the the state by looking at the two
114 * `statedet' fields, marked <- above.
115 * There are also intermediate states where the req's
116 * statedet fields do not agree with the queue it's on.
117 * We write these as, for example,
118 * AR to mean statedet says Allocated, but queued on pt_reserved
119 * A? to mean statedet says Allocated, but may be queued
120 * etc. They are only allowed while we are in a pt_... method function.
123 #define CDU_RECHARGE 250 /*ms*/
124 #define POINT_MOVEMENT 50 /*ms*/
128 PointReq *l[PT_MAX_QUEUE];
132 actually fire points: use item, and remove it, etc.
135 #define PT_MAX_QUEUE 15
138 * CDU and point queue states:
141 * ____________ pt_cdu_ conf'd
142 * / points_ \ charged .n
146 * |from INACTIVE -1 0
147 * |any <=Sta_Settling
150 * ___________ |turning
167 static PtSlot pt_cslot;
168 static int pt_cdu_charged;
169 static PointQueue pt_confirmed, pt_reserved;
171 static pt_maxdelay_reldeadline(int maxdelay_ms) {
172 return (maxdelay_ms - POINT_MOVEMENT + CDU_RECHARGE) / CDU_RECHARGE;
175 static void pt_queue_remove_index(PointQueue *q, int index) {
177 memmove(&q->l[index], &q->l[index+1], sizeof(q->l[0]) * (q->n - index));
180 static void pt_req_compar(const void *av, const void *bv) {
181 PointQueue *const *a= av;
182 PointQueue *const *b= av;
183 return (PtSlotSigned)((*b)->deadline - (*a)->deadline);
186 static void pt_queue_remove_item(PointQueue *q, PointReq *r) {
188 entry= bsearch(r, q->l, q->n, sizeof(q->l[0]), pt_req_compar);
190 pt_queue_remove_index(q, entry - q->l);
193 static void pt_dequeue(PointReq *r) { /* X->XA */
194 if (r->motions[0].i) {
195 pt_queue_remove_item(&pt_confirmed, r);
196 } else if (~r->deadline) {
197 pt_queue_remove_item(&pt_reserved, r);
201 static void pt_mark_as_allocated(PointReq *r) { /* AX->X */
202 /* Sets statedet fields for Allocated */
203 r->deadline= ~(PtSlot)0;
207 static ErrorCode pt_check_plan(void) {
208 /* Checks whether we can meet the currently queued commitments */
209 int future, conf, resv;
213 /* If CDU is charged we can do one thing right away */
214 while (conf < pt_confirmed.n &&
215 pt_confirmed.l[0].deadline==pt_cslot) {
216 if (!pt_cdu_charged) return EC_Points;
217 if (conf) return EC_Points;
223 PointReq *confr= conf < pt_confirmed.n ? pt_confirmed.l[conf] : 0;
224 PointReq *resvr= resv < pt_reserved .n ? pt_reserved .l[conf] : 0;
225 if (!confr && !resvr) break;
226 int confwhen= confr ? confr->deadline - pt_cslot : INT_MAX;
227 int resvwhen= resvr ? resvr->deadline : INT_MAX;
228 if (resvwhen < confwhen) {
235 if (usewhen > future) return EC_Points;
241 static ErrorCode pt_enqueue(PointQueue *q, PointReq *r) { /* XA -> X */
242 ErrorCode ec; /* ... where X is R or C and corresponds to q */
243 /* or on error, XA -> A */
245 if (q->n == PT_MAX_QUEUE) {
246 return EC_BufferFull;
250 insat>0 && CLOCK_COMPARE(r->deadline, <, q->l[insat-1]->deadline);
252 q->l[insat]= q->l[insat-1];
257 /* if this fails, indep machinery calls pt_destroy which dequeues */
260 /*---------- kind method entrypoints ----------*/
262 static SomeChange pt_allocate(int alloc_motions) {
265 assert(pt_cdu_charged>=0);
267 /* we need at least one motion in the table so we can tell
268 * the difference between the states by looking at motions[0].i */
271 r= mmalloc(sizeof(*r) + alloc_motions * sizeof(r->motions[0]));
272 r->deadline= ~(PtSlot)0;
273 r->n_motions= alloc_motions;
275 return (SomeChange*)r;
278 static ErrorCode point_reserve(SomeChange *chg, Segment *move,
280 PointReq *r= (PointReq*)chg;
281 r->deadline= pt_reldeadline(maxdelay_ms);
282 if (!r->deadline) { pt_mark_as_allocated(r); return EC_Points; }
283 return pt_queue_add(&pt_reserved, r);
286 static ErrorCode point_confirm(SomeChange *chg, Segment *move,
287 int n_motions, const Motion *motions,
289 PointReq *r= (PointReq*)chg;
293 /* If the segment is moving, these motions are already based on the
294 * actual physical position which is stored in the existing request.
295 * So we try removing the existing request from the queue and put
296 * it back if it doesn't work.
299 assert(n_motions <= r->n_motions);
300 newdeadline= pt_reldeadline(maxdelay_ms) + cslot;
301 allow_failure= newdeadline < r->deadline;
305 /* states of existing: */
306 PointReq *existing= move->moving; /* U or C */
307 if (existing) pt_dequeue(existing); /* U or CA */
310 memcpy(r->motions, motions, sizeof(r->motions[0])*n_motions);
311 if (!n_motions) motions[0].i= move->i->movfeats;
312 assert(motions[0].i);
313 r->n_motions= n_motions;
314 r->deadline= newdeadline + pt_cslot;
317 ec= pt_enqueue(&pt_confirmed, r);
318 assert(allow_failure || !ec);
320 if (existing) { /* CA */
321 if (ec) { /* state C but bad */
322 pt_dequeue(r); /* state CA */
323 pt_mark_as_allocated(r); /* state A */
324 ErrorCode ec_putback= pt_enqueue(&pt_confirmed, existing);
325 assert(!ec_putback); /* C */
326 } else { /* state C and good */
327 free(existing); /* U */
330 /* either ec=0 state C U
332 * or ec!=0 state C but bad C
335 if (!ec) pt_check_action();
340 static void pt_destroy(Change *chg) { /* X->XA and then free it */
341 PointReq *r= (PointReq*)chg;
346 /*---------- actually firing points, yay! ----------*/
348 static void pt_check_action(void) {
351 if (!pt_confirmed.n) return;
353 PointReq *r= pt_confirmed.l[0];
355 if (r->n_motions && pt_cdu_charged) {
356 /* look for something to fire */
357 Motion *m= &r->motions[--r->n_motions];
358 assert(m->posn < m->i->posns);
359 enco_pic_point(&piob, m->i->boob[m->posn]);
360 serial_transmit(&piob);
363 MovPosComb above_weight= m->i->weight * m->i->posns;
364 MovPosComb above= r->actual / above_weight;
365 MovPosComb below= r->actual % m->i->weight;
366 r->actual= above*above_weight + m->posn*m->i->weight + below;
370 /* look for something to report
371 * we can get things here other than from the above
372 * eg if we are asked to move the
374 Segment *move= r->h.move;
375 assert(move->moving == r);
376 pt_queue_remove_index(&pt_confirmed,0);
377 pt_mark_as_allocated(r); /* now state A aka Done */
378 movpos_done((Change*)r);
383 /*---------- entrypoints from rest of program ----------*/
385 static void points_all_abandon(void) {
388 assert(!pt_reserved.n);
390 for (i=0; i<pt_confirmed.n; i++) {
391 PointReq *r= pt_confirmed.l[i];
392 Segment *move= r->h.move;
393 assert(move->moving == (PointReq*)r);
395 move->movposcomb= r->h.actual;
400 toev_stop(&pt_motion_timeout);
403 static void points_turning_on(void) {
405 pt_motion_timeout.duration= POINT_MOVEMENT;
406 pt_motion_timeout.callback= pt_motion_done;
407 assert(!pt_motion_timeout.running);
410 void on_pic_charged(const PicInsnInfo *pii, const PicInsn *pi, int objnum) {
411 if (pt_cdu_charged<0) return;
413 if (pt_motion_timeout.running) {
414 toev_stop(&pt_motion_timeout);
420 /*========== method-independent machinery ==========*/
422 static const KindInfo methodinfos[]= {
423 { nomove_allocate, nomove_reserve, nomove_request, nomove_cancel },
424 { point_allocate, point_reserve, point_request, point_cancel },
428 static void movpos_done(Change *chg) {
429 Segment *move= chg->move;
431 move->movposcomb= r->actual;
433 r->h.on_done(move, r->h.u);
437 ChangeHeader *mp_allocate(const KindInfo *ki, Segment *move,
439 assert(sta_state >= Sta_Resolving);
440 chg= (ChangeHeader*)ki->allocate(alloc_motions);
447 movpos_change(Segment *back, Segment *move, *Segment *fwd,
448 int maxdelay_ms, MovPosChange chg,
449 MovPosCallback *on_done, *u) {
450 const SegmentInfo *movei= move->i;
452 MovPosComb actual, tcomb, bestcomb=0;
453 int tchanges, bestchanges=INT_MAX;
456 MovFeatKind kind= mfk_none;
459 kind= move->moving->ki - methodinfos;
460 actual= move->moving->actual;
462 actual= move->movposcomb;
465 for (tcomb=0, pci=movei->poscombs;
466 tcomb<movei->n_poscombs;
468 Segment *tback= &segments[pci->backwards.next];
469 Segment *tfwd= &segments[pci->forwards .next];
470 if (back && !(back==tback || back=tfwd)) continue;
471 if (fwd && !(fwd ==tback || fwd =tfwd)) continue;
473 if (movei->n_movfeats>1) {
474 /* we have to search for the one which is least effort, then */
475 for (feat=0, feati=movei->movfeats, tchanges=0;
476 feat<movei->n_movfeats;
478 if ((tcomb - actual) / feati->weight % feati->posns)
480 if (tchanges > bestchanges)
484 tchanges= bestchanges;
487 if (bestchanges==INT_MAX) { ec= EC_Invalid; goto x; }
490 Motion motions[movei->n_movfeats];
492 for (feat=0, feati=movei->movfeats;
493 feat<movei->n_movfeats;
495 if ((bestcomb - actual) / feati->weight % feati->posns)
498 if (feati->kind != kind) { ec= EC_MovFeatKindsCombination; goto x; }
501 motions[n_motions].i= feati;
502 motions[n_motions].posn= bestcomb / feati->weight % feati->posns;
506 const KindInfo *ki= methodinfos[kind];
509 assert(move == chg->move);
511 chg= mp_allocate(ki,move,n_motions);
514 chg->on_done= on_done;
517 ec= ki->request(chg, move, n_motions, motions, maxdelay_ms);
522 movpos_unreserve(chg);
527 movpos_reservechange(Segment *move, int maxdelay_ms, MovPosChange *res_r) {
528 MovFeatKind kind= mfk_none;
530 for (feat=0; feati=movei->movfeats;
531 feat<movei->n_movfeats;
534 if (feati->kind != kind) return EC_MovFeatKindsCombination;
538 const KindInfo *ki= methodinfos[kind];
539 ChangeHeader *chg= mp_allocate(ki, move, movei->n_movfeats);
540 ec= ki->reserve(chg, move, maxdelay_ms);
547 movpos_unreserve(chg);
551 void movpos_unreserve(MovPosChange *res) {
553 req->ki->destroy(req);