#! /bin/bash

# Get the files and create a file of tokens.

fnid > filelist
rm tokenlist
touch tokenlist
awk '{print "fid " $1 " >> tokenlist"}' < filelist > temp2
source temp2

# Get rid of some things that shouldn't be there, such as constants and
# C/C++ words, such as if/then/else/while.

cat tokenlist | sed -e '/^[0123456789]/d' > temp

sed -e '/^\(if\|then\|else\|while\|void\|int\|char\|float\|align\|\|volatile\|and\|or\|ASMVOLATILE\|ASMCONST\|assembler\|argv\|argc\|assert\|bclr\|bcs\|beq\|bge\|ble\|blt\|break\|bst\|btst\|byte\|C\|cc\|class\|clr\|cls\|cmp\|const\|do\|docs\|documentation\|DOXYGEN\|dst\|each\|endif\|enum\|env\|equal\|false\|true\|for\|H8\|H8300\|in\|jsr\|lidx\|long\|mov\|new\|nop\|or\|xor\|private\|public\|push\|rts\|rti\|stc\|union\|with\|_longjmp\|_setjmp\)$/d' < temp > tokenlist

sed -e '/^__/d' < tokenlist > temp

# temp has the list of tokens. Sort, removing duplicates

sort -fu < temp > tokenlist

# Whew, we now have a reasonably clean token list.  

# Create a list of all tokens

lid > tokenlistbyfile

# Create a file of all uses of each token.

rm tokenindex
touch tokenindex
awk '{print "echo " $1 "'---' >> tokenindex; gid " $1 " >> tokenindex"}' < tokenlist > temp
source temp

rm temp

# clean it up a bit

awk -F: '{if (lr1==$1) printf ", %s",$2; else printf "\n %s %s",$1,$2 ;lr1=$1}' tokenindex > tokenindex_cmprsd

