/* * 03/23/2005 * * FortranTokenMaker.java - Scanner for the Fortran programming language. * * This library is distributed under a modified BSD license. See the included * RSyntaxTextArea.License.txt file for details. */ package org.fife.ui.rsyntaxtextarea.modes; import java.io.*; import javax.swing.text.Segment; import org.fife.ui.rsyntaxtextarea.*; /** * Scanner for the Fortran programming language. * * This implementation was created using * JFlex 1.4.1; however, the generated file * was modified for performance. Memory allocation needs to be almost * completely removed to be competitive with the handwritten lexers (subclasses * of AbstractTokenMaker, so this class has been modified so that * Strings are never allocated (via yytext()), and the scanner never has to * worry about refilling its buffer (needlessly copying chars around). * We can achieve this because RText always scans exactly 1 line of tokens at a * time, and hands the scanner this line as an array of characters (a Segment * really). Since tokens contain pointers to char arrays instead of Strings * holding their contents, there is no need for allocating new memory for * Strings.

* * The actual algorithm generated for scanning has, of course, not been * modified.

* * If you wish to regenerate this file yourself, keep in mind the following: *

* * @author Robert Futrell * @version 0.4 * */ %% %public %class FortranTokenMaker %extends AbstractJFlexTokenMaker %implements TokenMaker %unicode %ignorecase %type org.fife.ui.rsyntaxtextarea.Token %{ /** * Constructor. We must have this here as there is no default, * no-parameter constructor generated by JFlex. */ public FortranTokenMaker() { super(); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. */ private void addToken(int tokenType) { addToken(zzStartRead, zzMarkedPos-1, tokenType); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. */ private void addToken(int start, int end, int tokenType) { int so = start + offsetShift; addToken(zzBuffer, start,end, tokenType, so); } /** * Adds the token specified to the current linked list of tokens. * * @param array The character array. * @param start The starting offset in the array. * @param end The ending offset in the array. * @param tokenType The token's type. * @param startOffset The offset in the document at which this token * occurs. */ public void addToken(char[] array, int start, int end, int tokenType, int startOffset) { super.addToken(array, start,end, tokenType, startOffset); zzStartRead = zzMarkedPos; } /** * Returns the text to place at the beginning and end of a * line to "comment" it in a this programming language. * * @return The start and end strings to add to a line to "comment" * it out. */ public String[] getLineCommentStartAndEnd() { return new String[] { "!", null }; } /** * Returns the first token in the linked list of tokens generated * from text. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * text starts. * @return The first Token in a linked list representing * the syntax highlighted text. */ public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = Token.NULL; switch (initialTokenType) { case Token.LITERAL_STRING_DOUBLE_QUOTE: state = STRING; start = text.offset; break; case Token.LITERAL_CHAR: state = CHAR; start = text.offset; break; default: state = Token.NULL; } s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new DefaultToken(); } } /** * Refills the input buffer. * * @return true if EOF was reached, otherwise * false. * @exception IOException if any I/O-Error occurs. */ private boolean zzRefill() throws java.io.IOException { return zzCurrentPos>=s.offset+s.count; } /** * Resets the scanner to read from a new input stream. * Does not close the old reader. * * All internal variables are reset, the old input stream * cannot be reused (internal buffer is discarded and lost). * Lexical state is set to YY_INITIAL. * * @param reader the new input stream */ public final void yyreset(java.io.Reader reader) throws java.io.IOException { // 's' has been updated. zzBuffer = s.array; /* * We replaced the line below with the two below it because zzRefill * no longer "refills" the buffer (since the way we do it, it's always * "full" the first time through, since it points to the segment's * array). So, we assign zzEndRead here. */ //zzStartRead = zzEndRead = s.offset; zzStartRead = s.offset; zzEndRead = zzStartRead + s.count - 1; zzCurrentPos = zzMarkedPos = zzPushbackPos = s.offset; zzLexicalState = YYINITIAL; zzReader = reader; zzAtBOL = true; zzAtEOF = false; } %} LineTerminator = (\n) WhiteSpace = ([ \t\f]) Column1CommentBegin = ([C\*]) Column1Comment2Begin = (D) AnywhereCommentBegin = (\!) Identifier = ([A-Za-z0-9_$]+) StringDelimiter = (\") CharDelimiter = (\') Operators1 = ("<"|">"|"<="|">="|"&"|"/="|"==") Operators2 = (\.(lt|gt|eq|ne|le|ge|and|or)\.) Operator = ({Operators1}|{Operators2}) Boolean = (\.(true|false)\.) %state STRING %state CHAR %% /* Keywords */ "INCLUDE" { addToken(Token.RESERVED_WORD); } "PROGRAM" { addToken(Token.RESERVED_WORD); } "MODULE" { addToken(Token.RESERVED_WORD); } "SUBROUTINE" { addToken(Token.RESERVED_WORD); } "FUNCTION" { addToken(Token.RESERVED_WORD); } "CONTAINS" { addToken(Token.RESERVED_WORD); } "USE" { addToken(Token.RESERVED_WORD); } "CALL" { addToken(Token.RESERVED_WORD); } "RETURN" { addToken(Token.RESERVED_WORD); } "IMPLICIT" { addToken(Token.RESERVED_WORD); } "EXPLICIT" { addToken(Token.RESERVED_WORD); } "NONE" { addToken(Token.RESERVED_WORD); } "DATA" { addToken(Token.RESERVED_WORD); } "PARAMETER" { addToken(Token.RESERVED_WORD); } "ALLOCATE" { addToken(Token.RESERVED_WORD); } "ALLOCATABLE" { addToken(Token.RESERVED_WORD); } "ALLOCATED" { addToken(Token.RESERVED_WORD); } "DEALLOCATE" { addToken(Token.RESERVED_WORD); } "INTEGER" { addToken(Token.RESERVED_WORD); } "REAL" { addToken(Token.RESERVED_WORD); } "DOUBLE" { addToken(Token.RESERVED_WORD); } "PRECISION" { addToken(Token.RESERVED_WORD); } "COMPLEX" { addToken(Token.RESERVED_WORD); } "LOGICAL" { addToken(Token.RESERVED_WORD); } "CHARACTER" { addToken(Token.RESERVED_WORD); } "DIMENSION" { addToken(Token.RESERVED_WORD); } "KIND" { addToken(Token.RESERVED_WORD); } "CASE" { addToken(Token.RESERVED_WORD); } "SELECT" { addToken(Token.RESERVED_WORD); } "DEFAULT" { addToken(Token.RESERVED_WORD); } "CONTINUE" { addToken(Token.RESERVED_WORD); } "CYCLE" { addToken(Token.RESERVED_WORD); } "DO" { addToken(Token.RESERVED_WORD); } "WHILE" { addToken(Token.RESERVED_WORD); } "ELSE" { addToken(Token.RESERVED_WORD); } "IF" { addToken(Token.RESERVED_WORD); } "ELSEIF" { addToken(Token.RESERVED_WORD); } "THEN" { addToken(Token.RESERVED_WORD); } "ELSEWHERE" { addToken(Token.RESERVED_WORD); } "END" { addToken(Token.RESERVED_WORD); } "ENDIF" { addToken(Token.RESERVED_WORD); } "ENDDO" { addToken(Token.RESERVED_WORD); } "FORALL" { addToken(Token.RESERVED_WORD); } "WHERE" { addToken(Token.RESERVED_WORD); } "EXIT" { addToken(Token.RESERVED_WORD); } "GOTO" { addToken(Token.RESERVED_WORD); } "PAUSE" { addToken(Token.RESERVED_WORD); } "STOP" { addToken(Token.RESERVED_WORD); } "BACKSPACE" { addToken(Token.RESERVED_WORD); } "CLOSE" { addToken(Token.RESERVED_WORD); } "ENDFILE" { addToken(Token.RESERVED_WORD); } "INQUIRE" { addToken(Token.RESERVED_WORD); } "OPEN" { addToken(Token.RESERVED_WORD); } "PRINT" { addToken(Token.RESERVED_WORD); } "READ" { addToken(Token.RESERVED_WORD); } "REWIND" { addToken(Token.RESERVED_WORD); } "WRITE" { addToken(Token.RESERVED_WORD); } "FORMAT" { addToken(Token.RESERVED_WORD); } "AIMAG" { addToken(Token.RESERVED_WORD); } "AINT" { addToken(Token.RESERVED_WORD); } "AMAX0" { addToken(Token.RESERVED_WORD); } "AMIN0" { addToken(Token.RESERVED_WORD); } "ANINT" { addToken(Token.RESERVED_WORD); } "CEILING" { addToken(Token.RESERVED_WORD); } "CMPLX" { addToken(Token.RESERVED_WORD); } "CONJG" { addToken(Token.RESERVED_WORD); } "DBLE" { addToken(Token.RESERVED_WORD); } "DCMPLX" { addToken(Token.RESERVED_WORD); } "DFLOAT" { addToken(Token.RESERVED_WORD); } "DIM" { addToken(Token.RESERVED_WORD); } "DPROD" { addToken(Token.RESERVED_WORD); } "FLOAT" { addToken(Token.RESERVED_WORD); } "FLOOR" { addToken(Token.RESERVED_WORD); } "IFIX" { addToken(Token.RESERVED_WORD); } "IMAG" { addToken(Token.RESERVED_WORD); } "INT" { addToken(Token.RESERVED_WORD); } "LOGICAL" { addToken(Token.RESERVED_WORD); } "MODULO" { addToken(Token.RESERVED_WORD); } "NINT" { addToken(Token.RESERVED_WORD); } "REAL" { addToken(Token.RESERVED_WORD); } "SIGN" { addToken(Token.RESERVED_WORD); } "SNGL" { addToken(Token.RESERVED_WORD); } "TRANSFER" { addToken(Token.RESERVED_WORD); } "ZEXT" { addToken(Token.RESERVED_WORD); } "ABS" { addToken(Token.RESERVED_WORD); } "ACOS" { addToken(Token.RESERVED_WORD); } "AIMAG" { addToken(Token.RESERVED_WORD); } "AINT" { addToken(Token.RESERVED_WORD); } "ALOG" { addToken(Token.RESERVED_WORD); } "ALOG10" { addToken(Token.RESERVED_WORD); } "AMAX0" { addToken(Token.RESERVED_WORD); } "AMAX1" { addToken(Token.RESERVED_WORD); } "AMIN0" { addToken(Token.RESERVED_WORD); } "AMIN1" { addToken(Token.RESERVED_WORD); } "AMOD" { addToken(Token.RESERVED_WORD); } "ANINT" { addToken(Token.RESERVED_WORD); } "ASIN" { addToken(Token.RESERVED_WORD); } "ATAN" { addToken(Token.RESERVED_WORD); } "ATAN2" { addToken(Token.RESERVED_WORD); } "CABS" { addToken(Token.RESERVED_WORD); } "CCOS" { addToken(Token.RESERVED_WORD); } "CHAR" { addToken(Token.RESERVED_WORD); } "CLOG" { addToken(Token.RESERVED_WORD); } "CMPLX" { addToken(Token.RESERVED_WORD); } "CONJG" { addToken(Token.RESERVED_WORD); } "COS" { addToken(Token.RESERVED_WORD); } "COSH" { addToken(Token.RESERVED_WORD); } "CSIN" { addToken(Token.RESERVED_WORD); } "CSQRT" { addToken(Token.RESERVED_WORD); } "DABS" { addToken(Token.RESERVED_WORD); } "DACOS" { addToken(Token.RESERVED_WORD); } "DASIN" { addToken(Token.RESERVED_WORD); } "DATAN" { addToken(Token.RESERVED_WORD); } "DATAN2" { addToken(Token.RESERVED_WORD); } "DBLE" { addToken(Token.RESERVED_WORD); } "DCOS" { addToken(Token.RESERVED_WORD); } "DCOSH" { addToken(Token.RESERVED_WORD); } "DDIM" { addToken(Token.RESERVED_WORD); } "DEXP" { addToken(Token.RESERVED_WORD); } "DIM" { addToken(Token.RESERVED_WORD); } "DINT" { addToken(Token.RESERVED_WORD); } "DLOG" { addToken(Token.RESERVED_WORD); } "DLOG10" { addToken(Token.RESERVED_WORD); } "DMAX1" { addToken(Token.RESERVED_WORD); } "DMIN1" { addToken(Token.RESERVED_WORD); } "DMOD" { addToken(Token.RESERVED_WORD); } "DNINT" { addToken(Token.RESERVED_WORD); } "DPROD" { addToken(Token.RESERVED_WORD); } "DREAL" { addToken(Token.RESERVED_WORD); } "DSIGN" { addToken(Token.RESERVED_WORD); } "DSIN" { addToken(Token.RESERVED_WORD); } "DSINH" { addToken(Token.RESERVED_WORD); } "DSQRT" { addToken(Token.RESERVED_WORD); } "DTAN" { addToken(Token.RESERVED_WORD); } "DTANH" { addToken(Token.RESERVED_WORD); } "EXP" { addToken(Token.RESERVED_WORD); } "FLOAT" { addToken(Token.RESERVED_WORD); } "IABS" { addToken(Token.RESERVED_WORD); } "ICHAR" { addToken(Token.RESERVED_WORD); } "IDIM" { addToken(Token.RESERVED_WORD); } "IDINT" { addToken(Token.RESERVED_WORD); } "IDNINT" { addToken(Token.RESERVED_WORD); } "IFIX" { addToken(Token.RESERVED_WORD); } "INDEX" { addToken(Token.RESERVED_WORD); } "INT" { addToken(Token.RESERVED_WORD); } "ISIGN" { addToken(Token.RESERVED_WORD); } "LEN" { addToken(Token.RESERVED_WORD); } "LGE" { addToken(Token.RESERVED_WORD); } "LGT" { addToken(Token.RESERVED_WORD); } "LLE" { addToken(Token.RESERVED_WORD); } "LLT" { addToken(Token.RESERVED_WORD); } "LOG" { addToken(Token.RESERVED_WORD); } "LOG10" { addToken(Token.RESERVED_WORD); } "MAX" { addToken(Token.RESERVED_WORD); } "MAX0" { addToken(Token.RESERVED_WORD); } "MAX1" { addToken(Token.RESERVED_WORD); } "MIN" { addToken(Token.RESERVED_WORD); } "MIN0" { addToken(Token.RESERVED_WORD); } "MIN1" { addToken(Token.RESERVED_WORD); } "MOD" { addToken(Token.RESERVED_WORD); } "NINT" { addToken(Token.RESERVED_WORD); } "REAL" { addToken(Token.RESERVED_WORD); } "SIGN" { addToken(Token.RESERVED_WORD); } "SIN" { addToken(Token.RESERVED_WORD); } "SINH" { addToken(Token.RESERVED_WORD); } "SNGL" { addToken(Token.RESERVED_WORD); } "SQRT" { addToken(Token.RESERVED_WORD); } "TAN" { addToken(Token.RESERVED_WORD); } "TANH" { addToken(Token.RESERVED_WORD); } { {LineTerminator} { addNullToken(); return firstToken; } {WhiteSpace}+ { addToken(Token.WHITESPACE); } /* String/Character Literals. */ {CharDelimiter} { start = zzMarkedPos-1; yybegin(CHAR); } {StringDelimiter} { start = zzMarkedPos-1; yybegin(STRING); } /* Comment Literals. */ /* Note that we cannot combine these as JFLex doesn't like combining an */ /* expression containing the beginning-of-line character '^'. */ {Column1CommentBegin} { // Since we change zzStartRead, we have the unfortunate // side-effect of not being able to use the '^' operator. // So we must check whether we're really at the beginning // of the line ourselves... if (zzStartRead==s.offset) { addToken(zzStartRead,zzEndRead, Token.COMMENT_EOL); addNullToken(); return firstToken; } else { addToken(Token.IDENTIFIER); } } {Column1Comment2Begin} { // Since we change zzStartRead, we have the unfortunate // side-effect of not being able to use the '^' operator. // So we must check whether we're really at the beginning // of the line ourselves... if (zzStartRead==s.offset) { addToken(zzStartRead,zzEndRead, Token.COMMENT_DOCUMENTATION); addNullToken(); return firstToken; } else { addToken(Token.IDENTIFIER); } } {AnywhereCommentBegin} { addToken(zzStartRead,zzEndRead, Token.COMMENT_EOL); addNullToken(); return firstToken; } /* Operators. */ {Operator} { addToken(Token.OPERATOR); } /* Boolean literals. */ {Boolean} { addToken(Token.LITERAL_BOOLEAN); } {Identifier} { addToken(Token.IDENTIFIER); } /* Ended with a line not in a string or char literal. */ <> { addNullToken(); return firstToken; } /* Catch any other (unhandled) characters. */ . { addToken(Token.IDENTIFIER); } } { [^\'\n]* {} \' { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_CHAR); } \n { addToken(start,zzStartRead-1, Token.LITERAL_CHAR); return firstToken; } <> { addToken(start,zzStartRead-1, Token.LITERAL_CHAR); return firstToken; } } { [^\"\n]* {} \" { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; } }