upgrade to most recent bluemonday (#11007)
* upgrade to most recent bluemonday * make vendor * update tests for bluemonday * update tests for bluemonday * update tests for bluemondaytokarchuk/v1.17
parent
4c54477bb5
commit
d00ebf445b
@ -0,0 +1,22 @@ |
||||
The MIT License (MIT) |
||||
|
||||
Copyright (c) 2015 Aymerick JEHANNE |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE. |
||||
|
@ -0,0 +1,60 @@ |
||||
package css |
||||
|
||||
import "fmt" |
||||
|
||||
// Declaration represents a parsed style property
|
||||
type Declaration struct { |
||||
Property string |
||||
Value string |
||||
Important bool |
||||
} |
||||
|
||||
// NewDeclaration instanciates a new Declaration
|
||||
func NewDeclaration() *Declaration { |
||||
return &Declaration{} |
||||
} |
||||
|
||||
// Returns string representation of the Declaration
|
||||
func (decl *Declaration) String() string { |
||||
return decl.StringWithImportant(true) |
||||
} |
||||
|
||||
// StringWithImportant returns string representation with optional !important part
|
||||
func (decl *Declaration) StringWithImportant(option bool) string { |
||||
result := fmt.Sprintf("%s: %s", decl.Property, decl.Value) |
||||
|
||||
if option && decl.Important { |
||||
result += " !important" |
||||
} |
||||
|
||||
result += ";" |
||||
|
||||
return result |
||||
} |
||||
|
||||
// Equal returns true if both Declarations are equals
|
||||
func (decl *Declaration) Equal(other *Declaration) bool { |
||||
return (decl.Property == other.Property) && (decl.Value == other.Value) && (decl.Important == other.Important) |
||||
} |
||||
|
||||
//
|
||||
// DeclarationsByProperty
|
||||
//
|
||||
|
||||
// DeclarationsByProperty represents sortable style declarations
|
||||
type DeclarationsByProperty []*Declaration |
||||
|
||||
// Implements sort.Interface
|
||||
func (declarations DeclarationsByProperty) Len() int { |
||||
return len(declarations) |
||||
} |
||||
|
||||
// Implements sort.Interface
|
||||
func (declarations DeclarationsByProperty) Swap(i, j int) { |
||||
declarations[i], declarations[j] = declarations[j], declarations[i] |
||||
} |
||||
|
||||
// Implements sort.Interface
|
||||
func (declarations DeclarationsByProperty) Less(i, j int) bool { |
||||
return declarations[i].Property < declarations[j].Property |
||||
} |
@ -0,0 +1,230 @@ |
||||
package css |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
const ( |
||||
indentSpace = 2 |
||||
) |
||||
|
||||
// RuleKind represents a Rule kind
|
||||
type RuleKind int |
||||
|
||||
// Rule kinds
|
||||
const ( |
||||
QualifiedRule RuleKind = iota |
||||
AtRule |
||||
) |
||||
|
||||
// At Rules than have Rules inside their block instead of Declarations
|
||||
var atRulesWithRulesBlock = []string{ |
||||
"@document", "@font-feature-values", "@keyframes", "@media", "@supports", |
||||
} |
||||
|
||||
// Rule represents a parsed CSS rule
|
||||
type Rule struct { |
||||
Kind RuleKind |
||||
|
||||
// At Rule name (eg: "@media")
|
||||
Name string |
||||
|
||||
// Raw prelude
|
||||
Prelude string |
||||
|
||||
// Qualified Rule selectors parsed from prelude
|
||||
Selectors []string |
||||
|
||||
// Style properties
|
||||
Declarations []*Declaration |
||||
|
||||
// At Rule embedded rules
|
||||
Rules []*Rule |
||||
|
||||
// Current rule embedding level
|
||||
EmbedLevel int |
||||
} |
||||
|
||||
// NewRule instanciates a new Rule
|
||||
func NewRule(kind RuleKind) *Rule { |
||||
return &Rule{ |
||||
Kind: kind, |
||||
} |
||||
} |
||||
|
||||
// Returns string representation of rule kind
|
||||
func (kind RuleKind) String() string { |
||||
switch kind { |
||||
case QualifiedRule: |
||||
return "Qualified Rule" |
||||
case AtRule: |
||||
return "At Rule" |
||||
default: |
||||
return "WAT" |
||||
} |
||||
} |
||||
|
||||
// EmbedsRules returns true if this rule embeds another rules
|
||||
func (rule *Rule) EmbedsRules() bool { |
||||
if rule.Kind == AtRule { |
||||
for _, atRuleName := range atRulesWithRulesBlock { |
||||
if rule.Name == atRuleName { |
||||
return true |
||||
} |
||||
} |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
// Equal returns true if both rules are equals
|
||||
func (rule *Rule) Equal(other *Rule) bool { |
||||
if (rule.Kind != other.Kind) || |
||||
(rule.Prelude != other.Prelude) || |
||||
(rule.Name != other.Name) { |
||||
return false |
||||
} |
||||
|
||||
if (len(rule.Selectors) != len(other.Selectors)) || |
||||
(len(rule.Declarations) != len(other.Declarations)) || |
||||
(len(rule.Rules) != len(other.Rules)) { |
||||
return false |
||||
} |
||||
|
||||
for i, sel := range rule.Selectors { |
||||
if sel != other.Selectors[i] { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
for i, decl := range rule.Declarations { |
||||
if !decl.Equal(other.Declarations[i]) { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
for i, rule := range rule.Rules { |
||||
if !rule.Equal(other.Rules[i]) { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// Diff returns a string representation of rules differences
|
||||
func (rule *Rule) Diff(other *Rule) []string { |
||||
result := []string{} |
||||
|
||||
if rule.Kind != other.Kind { |
||||
result = append(result, fmt.Sprintf("Kind: %s | %s", rule.Kind.String(), other.Kind.String())) |
||||
} |
||||
|
||||
if rule.Prelude != other.Prelude { |
||||
result = append(result, fmt.Sprintf("Prelude: \"%s\" | \"%s\"", rule.Prelude, other.Prelude)) |
||||
} |
||||
|
||||
if rule.Name != other.Name { |
||||
result = append(result, fmt.Sprintf("Name: \"%s\" | \"%s\"", rule.Name, other.Name)) |
||||
} |
||||
|
||||
if len(rule.Selectors) != len(other.Selectors) { |
||||
result = append(result, fmt.Sprintf("Selectors: %v | %v", strings.Join(rule.Selectors, ", "), strings.Join(other.Selectors, ", "))) |
||||
} else { |
||||
for i, sel := range rule.Selectors { |
||||
if sel != other.Selectors[i] { |
||||
result = append(result, fmt.Sprintf("Selector: \"%s\" | \"%s\"", sel, other.Selectors[i])) |
||||
} |
||||
} |
||||
} |
||||
|
||||
if len(rule.Declarations) != len(other.Declarations) { |
||||
result = append(result, fmt.Sprintf("Declarations Nb: %d | %d", len(rule.Declarations), len(other.Declarations))) |
||||
} else { |
||||
for i, decl := range rule.Declarations { |
||||
if !decl.Equal(other.Declarations[i]) { |
||||
result = append(result, fmt.Sprintf("Declaration: \"%s\" | \"%s\"", decl.String(), other.Declarations[i].String())) |
||||
} |
||||
} |
||||
} |
||||
|
||||
if len(rule.Rules) != len(other.Rules) { |
||||
result = append(result, fmt.Sprintf("Rules Nb: %d | %d", len(rule.Rules), len(other.Rules))) |
||||
} else { |
||||
|
||||
for i, rule := range rule.Rules { |
||||
if !rule.Equal(other.Rules[i]) { |
||||
result = append(result, fmt.Sprintf("Rule: \"%s\" | \"%s\"", rule.String(), other.Rules[i].String())) |
||||
} |
||||
} |
||||
} |
||||
|
||||
return result |
||||
} |
||||
|
||||
// Returns the string representation of a rule
|
||||
func (rule *Rule) String() string { |
||||
result := "" |
||||
|
||||
if rule.Kind == QualifiedRule { |
||||
for i, sel := range rule.Selectors { |
||||
if i != 0 { |
||||
result += ", " |
||||
} |
||||
result += sel |
||||
} |
||||
} else { |
||||
// AtRule
|
||||
result += fmt.Sprintf("%s", rule.Name) |
||||
|
||||
if rule.Prelude != "" { |
||||
if result != "" { |
||||
result += " " |
||||
} |
||||
result += fmt.Sprintf("%s", rule.Prelude) |
||||
} |
||||
} |
||||
|
||||
if (len(rule.Declarations) == 0) && (len(rule.Rules) == 0) { |
||||
result += ";" |
||||
} else { |
||||
result += " {\n" |
||||
|
||||
if rule.EmbedsRules() { |
||||
for _, subRule := range rule.Rules { |
||||
result += fmt.Sprintf("%s%s\n", rule.indent(), subRule.String()) |
||||
} |
||||
} else { |
||||
for _, decl := range rule.Declarations { |
||||
result += fmt.Sprintf("%s%s\n", rule.indent(), decl.String()) |
||||
} |
||||
} |
||||
|
||||
result += fmt.Sprintf("%s}", rule.indentEndBlock()) |
||||
} |
||||
|
||||
return result |
||||
} |
||||
|
||||
// Returns identation spaces for declarations and rules
|
||||
func (rule *Rule) indent() string { |
||||
result := "" |
||||
|
||||
for i := 0; i < ((rule.EmbedLevel + 1) * indentSpace); i++ { |
||||
result += " " |
||||
} |
||||
|
||||
return result |
||||
} |
||||
|
||||
// Returns identation spaces for end of block character
|
||||
func (rule *Rule) indentEndBlock() string { |
||||
result := "" |
||||
|
||||
for i := 0; i < (rule.EmbedLevel * indentSpace); i++ { |
||||
result += " " |
||||
} |
||||
|
||||
return result |
||||
} |
@ -0,0 +1,25 @@ |
||||
package css |
||||
|
||||
// Stylesheet represents a parsed stylesheet
|
||||
type Stylesheet struct { |
||||
Rules []*Rule |
||||
} |
||||
|
||||
// NewStylesheet instanciate a new Stylesheet
|
||||
func NewStylesheet() *Stylesheet { |
||||
return &Stylesheet{} |
||||
} |
||||
|
||||
// Returns string representation of the Stylesheet
|
||||
func (sheet *Stylesheet) String() string { |
||||
result := "" |
||||
|
||||
for _, rule := range sheet.Rules { |
||||
if result != "" { |
||||
result += "\n" |
||||
} |
||||
result += rule.String() |
||||
} |
||||
|
||||
return result |
||||
} |
@ -0,0 +1,22 @@ |
||||
The MIT License (MIT) |
||||
|
||||
Copyright (c) 2015 Aymerick JEHANNE |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE. |
||||
|
@ -0,0 +1,409 @@ |
||||
package parser |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"regexp" |
||||
"strings" |
||||
|
||||
"github.com/gorilla/css/scanner" |
||||
|
||||
"github.com/aymerick/douceur/css" |
||||
) |
||||
|
||||
const ( |
||||
importantSuffixRegexp = `(?i)\s*!important\s*$` |
||||
) |
||||
|
||||
var ( |
||||
importantRegexp *regexp.Regexp |
||||
) |
||||
|
||||
// Parser represents a CSS parser
|
||||
type Parser struct { |
||||
scan *scanner.Scanner // Tokenizer
|
||||
|
||||
// Tokens parsed but not consumed yet
|
||||
tokens []*scanner.Token |
||||
|
||||
// Rule embedding level
|
||||
embedLevel int |
||||
} |
||||
|
||||
func init() { |
||||
importantRegexp = regexp.MustCompile(importantSuffixRegexp) |
||||
} |
||||
|
||||
// NewParser instanciates a new parser
|
||||
func NewParser(txt string) *Parser { |
||||
return &Parser{ |
||||
scan: scanner.New(txt), |
||||
} |
||||
} |
||||
|
||||
// Parse parses a whole stylesheet
|
||||
func Parse(text string) (*css.Stylesheet, error) { |
||||
result, err := NewParser(text).ParseStylesheet() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return result, nil |
||||
} |
||||
|
||||
// ParseDeclarations parses CSS declarations
|
||||
func ParseDeclarations(text string) ([]*css.Declaration, error) { |
||||
result, err := NewParser(text).ParseDeclarations() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return result, nil |
||||
} |
||||
|
||||
// ParseStylesheet parses a stylesheet
|
||||
func (parser *Parser) ParseStylesheet() (*css.Stylesheet, error) { |
||||
result := css.NewStylesheet() |
||||
|
||||
// Parse BOM
|
||||
if _, err := parser.parseBOM(); err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
// Parse list of rules
|
||||
rules, err := parser.ParseRules() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result.Rules = rules |
||||
|
||||
return result, nil |
||||
} |
||||
|
||||
// ParseRules parses a list of rules
|
||||
func (parser *Parser) ParseRules() ([]*css.Rule, error) { |
||||
result := []*css.Rule{} |
||||
|
||||
inBlock := false |
||||
if parser.tokenChar("{") { |
||||
// parsing a block of rules
|
||||
inBlock = true |
||||
parser.embedLevel++ |
||||
|
||||
parser.shiftToken() |
||||
} |
||||
|
||||
for parser.tokenParsable() { |
||||
if parser.tokenIgnorable() { |
||||
parser.shiftToken() |
||||
} else if parser.tokenChar("}") { |
||||
if !inBlock { |
||||
errMsg := fmt.Sprintf("Unexpected } character: %s", parser.nextToken().String()) |
||||
return result, errors.New(errMsg) |
||||
} |
||||
|
||||
parser.shiftToken() |
||||
parser.embedLevel-- |
||||
|
||||
// finished
|
||||
break |
||||
} else { |
||||
rule, err := parser.ParseRule() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
rule.EmbedLevel = parser.embedLevel |
||||
result = append(result, rule) |
||||
} |
||||
} |
||||
|
||||
return result, parser.err() |
||||
} |
||||
|
||||
// ParseRule parses a rule
|
||||
func (parser *Parser) ParseRule() (*css.Rule, error) { |
||||
if parser.tokenAtKeyword() { |
||||
return parser.parseAtRule() |
||||
} |
||||
|
||||
return parser.parseQualifiedRule() |
||||
} |
||||
|
||||
// ParseDeclarations parses a list of declarations
|
||||
func (parser *Parser) ParseDeclarations() ([]*css.Declaration, error) { |
||||
result := []*css.Declaration{} |
||||
|
||||
if parser.tokenChar("{") { |
||||
parser.shiftToken() |
||||
} |
||||
|
||||
for parser.tokenParsable() { |
||||
if parser.tokenIgnorable() { |
||||
parser.shiftToken() |
||||
} else if parser.tokenChar("}") { |
||||
// end of block
|
||||
parser.shiftToken() |
||||
break |
||||
} else { |
||||
declaration, err := parser.ParseDeclaration() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result = append(result, declaration) |
||||
} |
||||
} |
||||
|
||||
return result, parser.err() |
||||
} |
||||
|
||||
// ParseDeclaration parses a declaration
|
||||
func (parser *Parser) ParseDeclaration() (*css.Declaration, error) { |
||||
result := css.NewDeclaration() |
||||
curValue := "" |
||||
|
||||
for parser.tokenParsable() { |
||||
if parser.tokenChar(":") { |
||||
result.Property = strings.TrimSpace(curValue) |
||||
curValue = "" |
||||
|
||||
parser.shiftToken() |
||||
} else if parser.tokenChar(";") || parser.tokenChar("}") { |
||||
if result.Property == "" { |
||||
errMsg := fmt.Sprintf("Unexpected ; character: %s", parser.nextToken().String()) |
||||
return result, errors.New(errMsg) |
||||
} |
||||
|
||||
if importantRegexp.MatchString(curValue) { |
||||
result.Important = true |
||||
curValue = importantRegexp.ReplaceAllString(curValue, "") |
||||
} |
||||
|
||||
result.Value = strings.TrimSpace(curValue) |
||||
|
||||
if parser.tokenChar(";") { |
||||
parser.shiftToken() |
||||
} |
||||
|
||||
// finished
|
||||
break |
||||
} else { |
||||
token := parser.shiftToken() |
||||
curValue += token.Value |
||||
} |
||||
} |
||||
|
||||
// log.Printf("[parsed] Declaration: %s", result.String())
|
||||
|
||||
return result, parser.err() |
||||
} |
||||
|
||||
// Parse an At Rule
|
||||
func (parser *Parser) parseAtRule() (*css.Rule, error) { |
||||
// parse rule name (eg: "@import")
|
||||
token := parser.shiftToken() |
||||
|
||||
result := css.NewRule(css.AtRule) |
||||
result.Name = token.Value |
||||
|
||||
for parser.tokenParsable() { |
||||
if parser.tokenChar(";") { |
||||
parser.shiftToken() |
||||
|
||||
// finished
|
||||
break |
||||
} else if parser.tokenChar("{") { |
||||
if result.EmbedsRules() { |
||||
// parse rules block
|
||||
rules, err := parser.ParseRules() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result.Rules = rules |
||||
} else { |
||||
// parse declarations block
|
||||
declarations, err := parser.ParseDeclarations() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result.Declarations = declarations |
||||
} |
||||
|
||||
// finished
|
||||
break |
||||
} else { |
||||
// parse prelude
|
||||
prelude, err := parser.parsePrelude() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result.Prelude = prelude |
||||
} |
||||
} |
||||
|
||||
// log.Printf("[parsed] Rule: %s", result.String())
|
||||
|
||||
return result, parser.err() |
||||
} |
||||
|
||||
// Parse a Qualified Rule
|
||||
func (parser *Parser) parseQualifiedRule() (*css.Rule, error) { |
||||
result := css.NewRule(css.QualifiedRule) |
||||
|
||||
for parser.tokenParsable() { |
||||
if parser.tokenChar("{") { |
||||
if result.Prelude == "" { |
||||
errMsg := fmt.Sprintf("Unexpected { character: %s", parser.nextToken().String()) |
||||
return result, errors.New(errMsg) |
||||
} |
||||
|
||||
// parse declarations block
|
||||
declarations, err := parser.ParseDeclarations() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result.Declarations = declarations |
||||
|
||||
// finished
|
||||
break |
||||
} else { |
||||
// parse prelude
|
||||
prelude, err := parser.parsePrelude() |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
result.Prelude = prelude |
||||
} |
||||
} |
||||
|
||||
result.Selectors = strings.Split(result.Prelude, ",") |
||||
for i, sel := range result.Selectors { |
||||
result.Selectors[i] = strings.TrimSpace(sel) |
||||
} |
||||
|
||||
// log.Printf("[parsed] Rule: %s", result.String())
|
||||
|
||||
return result, parser.err() |
||||
} |
||||
|
||||
// Parse Rule prelude
|
||||
func (parser *Parser) parsePrelude() (string, error) { |
||||
result := "" |
||||
|
||||
for parser.tokenParsable() && !parser.tokenEndOfPrelude() { |
||||
token := parser.shiftToken() |
||||
result += token.Value |
||||
} |
||||
|
||||
result = strings.TrimSpace(result) |
||||
|
||||
// log.Printf("[parsed] prelude: %s", result)
|
||||
|
||||
return result, parser.err() |
||||
} |
||||
|
||||
// Parse BOM
|
||||
func (parser *Parser) parseBOM() (bool, error) { |
||||
if parser.nextToken().Type == scanner.TokenBOM { |
||||
parser.shiftToken() |
||||
return true, nil |
||||
} |
||||
|
||||
return false, parser.err() |
||||
} |
||||
|
||||
// Returns next token without removing it from tokens buffer
|
||||
func (parser *Parser) nextToken() *scanner.Token { |
||||
if len(parser.tokens) == 0 { |
||||
// fetch next token
|
||||
nextToken := parser.scan.Next() |
||||
|
||||
// log.Printf("[token] %s => %v", nextToken.Type.String(), nextToken.Value)
|
||||
|
||||
// queue it
|
||||
parser.tokens = append(parser.tokens, nextToken) |
||||
} |
||||
|
||||
return parser.tokens[0] |
||||
} |
||||
|
||||
// Returns next token and remove it from the tokens buffer
|
||||
func (parser *Parser) shiftToken() *scanner.Token { |
||||
var result *scanner.Token |
||||
|
||||
result, parser.tokens = parser.tokens[0], parser.tokens[1:] |
||||
return result |
||||
} |
||||
|
||||
// Returns tokenizer error, or nil if no error
|
||||
func (parser *Parser) err() error { |
||||
if parser.tokenError() { |
||||
token := parser.nextToken() |
||||
return fmt.Errorf("Tokenizer error: %s", token.String()) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Returns true if next token is Error
|
||||
func (parser *Parser) tokenError() bool { |
||||
return parser.nextToken().Type == scanner.TokenError |
||||
} |
||||
|
||||
// Returns true if next token is EOF
|
||||
func (parser *Parser) tokenEOF() bool { |
||||
return parser.nextToken().Type == scanner.TokenEOF |
||||
} |
||||
|
||||
// Returns true if next token is a whitespace
|
||||
func (parser *Parser) tokenWS() bool { |
||||
return parser.nextToken().Type == scanner.TokenS |
||||
} |
||||
|
||||
// Returns true if next token is a comment
|
||||
func (parser *Parser) tokenComment() bool { |
||||
return parser.nextToken().Type == scanner.TokenComment |
||||
} |
||||
|
||||
// Returns true if next token is a CDO or a CDC
|
||||
func (parser *Parser) tokenCDOorCDC() bool { |
||||
switch parser.nextToken().Type { |
||||
case scanner.TokenCDO, scanner.TokenCDC: |
||||
return true |
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
|
||||
// Returns true if next token is ignorable
|
||||
func (parser *Parser) tokenIgnorable() bool { |
||||
return parser.tokenWS() || parser.tokenComment() || parser.tokenCDOorCDC() |
||||
} |
||||
|
||||
// Returns true if next token is parsable
|
||||
func (parser *Parser) tokenParsable() bool { |
||||
return !parser.tokenEOF() && !parser.tokenError() |
||||
} |
||||
|
||||
// Returns true if next token is an At Rule keyword
|
||||
func (parser *Parser) tokenAtKeyword() bool { |
||||
return parser.nextToken().Type == scanner.TokenAtKeyword |
||||
} |
||||
|
||||
// Returns true if next token is given character
|
||||
func (parser *Parser) tokenChar(value string) bool { |
||||
token := parser.nextToken() |
||||
return (token.Type == scanner.TokenChar) && (token.Value == value) |
||||
} |
||||
|
||||
// Returns true if next token marks the end of a prelude
|
||||
func (parser *Parser) tokenEndOfPrelude() bool { |
||||
return parser.tokenChar(";") || parser.tokenChar("{") |
||||
} |
@ -0,0 +1,27 @@ |
||||
Copyright (c) 2013, Gorilla web toolkit |
||||
All rights reserved. |
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, |
||||
are permitted provided that the following conditions are met: |
||||
|
||||
Redistributions of source code must retain the above copyright notice, this |
||||
list of conditions and the following disclaimer. |
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice, this |
||||
list of conditions and the following disclaimer in the documentation and/or |
||||
other materials provided with the distribution. |
||||
|
||||
Neither the name of the {organization} nor the names of its |
||||
contributors may be used to endorse or promote products derived from |
||||
this software without specific prior written permission. |
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR |
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,33 @@ |
||||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/* |
||||
Package gorilla/css/scanner generates tokens for a CSS3 input. |
||||
|
||||
It follows the CSS3 specification located at: |
||||
|
||||
http://www.w3.org/TR/css3-syntax/
|
||||
|
||||
To use it, create a new scanner for a given CSS string and call Next() until |
||||
the token returned has type TokenEOF or TokenError: |
||||
|
||||
s := scanner.New(myCSS) |
||||
for { |
||||
token := s.Next() |
||||
if token.Type == scanner.TokenEOF || token.Type == scanner.TokenError { |
||||
break |
||||
} |
||||
// Do something with the token...
|
||||
} |
||||
|
||||
Following the CSS3 specification, an error can only occur when the scanner |
||||
finds an unclosed quote or unclosed comment. In these cases the text becomes |
||||
"untokenizable". Everything else is tokenizable and it is up to a parser |
||||
to make sense of the token stream (or ignore nonsensical token sequences). |
||||
|
||||
Note: the scanner doesn't perform lexical analysis or, in other words, it |
||||
doesn't care about the token context. It is intended to be used by a |
||||
lexer or parser. |
||||
*/ |
||||
package scanner |
@ -0,0 +1,356 @@ |
||||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package scanner |
||||
|
||||
import ( |
||||
"fmt" |
||||
"regexp" |
||||
"strings" |
||||
"unicode" |
||||
"unicode/utf8" |
||||
) |
||||
|
||||
// tokenType identifies the type of lexical tokens.
|
||||
type tokenType int |
||||
|
||||
// String returns a string representation of the token type.
|
||||
func (t tokenType) String() string { |
||||
return tokenNames[t] |
||||
} |
||||
|
||||
// Token represents a token and the corresponding string.
|
||||
type Token struct { |
||||
Type tokenType |
||||
Value string |
||||
Line int |
||||
Column int |
||||
} |
||||
|
||||
// String returns a string representation of the token.
|
||||
func (t *Token) String() string { |
||||
if len(t.Value) > 10 { |
||||
return fmt.Sprintf("%s (line: %d, column: %d): %.10q...", |
||||
t.Type, t.Line, t.Column, t.Value) |
||||
} |
||||
return fmt.Sprintf("%s (line: %d, column: %d): %q", |
||||
t.Type, t.Line, t.Column, t.Value) |
||||
} |
||||
|
||||
// All tokens -----------------------------------------------------------------
|
||||
|
||||
// The complete list of tokens in CSS3.
|
||||
const ( |
||||
// Scanner flags.
|
||||
TokenError tokenType = iota |
||||
TokenEOF |
||||
// From now on, only tokens from the CSS specification.
|
||||
TokenIdent |
||||
TokenAtKeyword |
||||
TokenString |
||||
TokenHash |
||||
TokenNumber |
||||
TokenPercentage |
||||
TokenDimension |
||||
TokenURI |
||||
TokenUnicodeRange |
||||
TokenCDO |
||||
TokenCDC |
||||
TokenS |
||||
TokenComment |
||||
TokenFunction |
||||
TokenIncludes |
||||
TokenDashMatch |
||||
TokenPrefixMatch |
||||
TokenSuffixMatch |
||||
TokenSubstringMatch |
||||
TokenChar |
||||
TokenBOM |
||||
) |
||||
|
||||
// tokenNames maps tokenType's to their names. Used for conversion to string.
|
||||
var tokenNames = map[tokenType]string{ |
||||
TokenError: "error", |
||||
TokenEOF: "EOF", |
||||
TokenIdent: "IDENT", |
||||
TokenAtKeyword: "ATKEYWORD", |
||||
TokenString: "STRING", |
||||
TokenHash: "HASH", |
||||
TokenNumber: "NUMBER", |
||||
TokenPercentage: "PERCENTAGE", |
||||
TokenDimension: "DIMENSION", |
||||
TokenURI: "URI", |
||||
TokenUnicodeRange: "UNICODE-RANGE", |
||||
TokenCDO: "CDO", |
||||
TokenCDC: "CDC", |
||||
TokenS: "S", |
||||
TokenComment: "COMMENT", |
||||
TokenFunction: "FUNCTION", |
||||
TokenIncludes: "INCLUDES", |
||||
TokenDashMatch: "DASHMATCH", |
||||
TokenPrefixMatch: "PREFIXMATCH", |
||||
TokenSuffixMatch: "SUFFIXMATCH", |
||||
TokenSubstringMatch: "SUBSTRINGMATCH", |
||||
TokenChar: "CHAR", |
||||
TokenBOM: "BOM", |
||||
} |
||||
|
||||
// Macros and productions -----------------------------------------------------
|
||||
// http://www.w3.org/TR/css3-syntax/#tokenization
|
||||
|
||||
var macroRegexp = regexp.MustCompile(`\{[a-z]+\}`) |
||||
|
||||
// macros maps macro names to patterns to be expanded.
|
||||
var macros = map[string]string{ |
||||
// must be escaped: `\.+*?()|[]{}^$`
|
||||
"ident": `-?{nmstart}{nmchar}*`, |
||||
"name": `{nmchar}+`, |
||||
"nmstart": `[a-zA-Z_]|{nonascii}|{escape}`, |
||||
"nonascii": "[\u0080-\uD7FF\uE000-\uFFFD\U00010000-\U0010FFFF]", |
||||
"unicode": `\\[0-9a-fA-F]{1,6}{wc}?`, |
||||
"escape": "{unicode}|\\\\[\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\U00010000-\U0010FFFF]", |
||||
"nmchar": `[a-zA-Z0-9_-]|{nonascii}|{escape}`, |
||||
"num": `[0-9]*\.[0-9]+|[0-9]+`, |
||||
"string": `"(?:{stringchar}|')*"|'(?:{stringchar}|")*'`, |
||||
"stringchar": `{urlchar}|[ ]|\\{nl}`, |
||||
"nl": `[\n\r\f]|\r\n`, |
||||
"w": `{wc}*`, |
||||
"wc": `[\t\n\f\r ]`, |
||||
|
||||
// urlchar should accept [(ascii characters minus those that need escaping)|{nonascii}|{escape}]
|
||||
// ASCII characters range = `[\u0020-\u007e]`
|
||||
// Skip space \u0020 = `[\u0021-\u007e]`
|
||||
// Skip quotation mark \0022 = `[\u0021\u0023-\u007e]`
|
||||
// Skip apostrophe \u0027 = `[\u0021\u0023-\u0026\u0028-\u007e]`
|
||||
// Skip reverse solidus \u005c = `[\u0021\u0023-\u0026\u0028-\u005b\u005d\u007e]`
|
||||
// Finally, the left square bracket (\u005b) and right (\u005d) needs escaping themselves
|
||||
"urlchar": "[\u0021\u0023-\u0026\u0028-\\\u005b\\\u005d-\u007E]|{nonascii}|{escape}", |
||||
} |
||||
|
||||
// productions maps the list of tokens to patterns to be expanded.
|
||||
var productions = map[tokenType]string{ |
||||
// Unused regexps (matched using other methods) are commented out.
|
||||
TokenIdent: `{ident}`, |
||||
TokenAtKeyword: `@{ident}`, |
||||
TokenString: `{string}`, |
||||
TokenHash: `#{name}`, |
||||
TokenNumber: `{num}`, |
||||
TokenPercentage: `{num}%`, |
||||
TokenDimension: `{num}{ident}`, |
||||
TokenURI: `url\({w}(?:{string}|{urlchar}*?){w}\)`, |
||||
TokenUnicodeRange: `U\+[0-9A-F\?]{1,6}(?:-[0-9A-F]{1,6})?`, |
||||
//TokenCDO: `<!--`,
|
||||
TokenCDC: `-->`, |
||||
TokenS: `{wc}+`, |
||||
TokenComment: `/\*[^\*]*[\*]+(?:[^/][^\*]*[\*]+)*/`, |
||||
TokenFunction: `{ident}\(`, |
||||
//TokenIncludes: `~=`,
|
||||
//TokenDashMatch: `\|=`,
|
||||
//TokenPrefixMatch: `\^=`,
|
||||
//TokenSuffixMatch: `\$=`,
|
||||
//TokenSubstringMatch: `\*=`,
|
||||
//TokenChar: `[^"']`,
|
||||
//TokenBOM: "\uFEFF",
|
||||
} |
||||
|
||||
// matchers maps the list of tokens to compiled regular expressions.
|
||||
//
|
||||
// The map is filled on init() using the macros and productions defined in
|
||||
// the CSS specification.
|
||||
var matchers = map[tokenType]*regexp.Regexp{} |
||||
|
||||
// matchOrder is the order to test regexps when first-char shortcuts
|
||||
// can't be used.
|
||||
var matchOrder = []tokenType{ |
||||
TokenURI, |
||||
TokenFunction, |
||||
TokenUnicodeRange, |
||||
TokenIdent, |
||||
TokenDimension, |
||||
TokenPercentage, |
||||
TokenNumber, |
||||
TokenCDC, |
||||
} |
||||
|
||||
func init() { |
||||
// replace macros and compile regexps for productions.
|
||||
replaceMacro := func(s string) string { |
||||
return "(?:" + macros[s[1:len(s)-1]] + ")" |
||||
} |
||||
for t, s := range productions { |
||||
for macroRegexp.MatchString(s) { |
||||
s = macroRegexp.ReplaceAllStringFunc(s, replaceMacro) |
||||
} |
||||
matchers[t] = regexp.MustCompile("^(?:" + s + ")") |
||||
} |
||||
} |
||||
|
||||
// Scanner --------------------------------------------------------------------
|
||||
|
||||
// New returns a new CSS scanner for the given input.
|
||||
func New(input string) *Scanner { |
||||
// Normalize newlines.
|
||||
input = strings.Replace(input, "\r\n", "\n", -1) |
||||
return &Scanner{ |
||||
input: input, |
||||
row: 1, |
||||
col: 1, |
||||
} |
||||
} |
||||
|
||||
// Scanner scans an input and emits tokens following the CSS3 specification.
|
||||
type Scanner struct { |
||||
input string |
||||
pos int |
||||
row int |
||||
col int |
||||
err *Token |
||||
} |
||||
|
||||
// Next returns the next token from the input.
|
||||
//
|
||||
// At the end of the input the token type is TokenEOF.
|
||||
//
|
||||
// If the input can't be tokenized the token type is TokenError. This occurs
|
||||
// in case of unclosed quotation marks or comments.
|
||||
func (s *Scanner) Next() *Token { |
||||
if s.err != nil { |
||||
return s.err |
||||
} |
||||
if s.pos >= len(s.input) { |
||||
s.err = &Token{TokenEOF, "", s.row, s.col} |
||||
return s.err |
||||
} |
||||
if s.pos == 0 { |
||||
// Test BOM only once, at the beginning of the file.
|
||||
if strings.HasPrefix(s.input, "\uFEFF") { |
||||
return s.emitSimple(TokenBOM, "\uFEFF") |
||||
} |
||||
} |
||||
// There's a lot we can guess based on the first byte so we'll take a
|
||||
// shortcut before testing multiple regexps.
|
||||
input := s.input[s.pos:] |
||||
switch input[0] { |
||||
case '\t', '\n', '\f', '\r', ' ': |
||||
// Whitespace.
|
||||
return s.emitToken(TokenS, matchers[TokenS].FindString(input)) |
||||
case '.': |
||||
// Dot is too common to not have a quick check.
|
||||
// We'll test if this is a Char; if it is followed by a number it is a
|
||||
// dimension/percentage/number, and this will be matched later.
|
||||
if len(input) > 1 && !unicode.IsDigit(rune(input[1])) { |
||||
return s.emitSimple(TokenChar, ".") |
||||
} |
||||
case '#': |
||||
// Another common one: Hash or Char.
|
||||
if match := matchers[TokenHash].FindString(input); match != "" { |
||||
return s.emitToken(TokenHash, match) |
||||
} |
||||
return s.emitSimple(TokenChar, "#") |
||||
case '@': |
||||
// Another common one: AtKeyword or Char.
|
||||
if match := matchers[TokenAtKeyword].FindString(input); match != "" { |
||||
return s.emitSimple(TokenAtKeyword, match) |
||||
} |
||||
return s.emitSimple(TokenChar, "@") |
||||
case ':', ',', ';', '%', '&', '+', '=', '>', '(', ')', '[', ']', '{', '}': |
||||
// More common chars.
|
||||
return s.emitSimple(TokenChar, string(input[0])) |
||||
case '"', '\'': |
||||
// String or error.
|
||||
match := matchers[TokenString].FindString(input) |
||||
if match != "" { |
||||
return s.emitToken(TokenString, match) |
||||
} |
||||
|
||||
s.err = &Token{TokenError, "unclosed quotation mark", s.row, s.col} |
||||
return s.err |
||||
case '/': |
||||
// Comment, error or Char.
|
||||
if len(input) > 1 && input[1] == '*' { |
||||
match := matchers[TokenComment].FindString(input) |
||||
if match != "" { |
||||
return s.emitToken(TokenComment, match) |
||||
} else { |
||||
s.err = &Token{TokenError, "unclosed comment", s.row, s.col} |
||||
return s.err |
||||
} |
||||
} |
||||
return s.emitSimple(TokenChar, "/") |
||||
case '~': |
||||
// Includes or Char.
|
||||
return s.emitPrefixOrChar(TokenIncludes, "~=") |
||||
case '|': |
||||
// DashMatch or Char.
|
||||
return s.emitPrefixOrChar(TokenDashMatch, "|=") |
||||
case '^': |
||||
// PrefixMatch or Char.
|
||||
return s.emitPrefixOrChar(TokenPrefixMatch, "^=") |
||||
case '$': |
||||
// SuffixMatch or Char.
|
||||
return s.emitPrefixOrChar(TokenSuffixMatch, "$=") |
||||
case '*': |
||||
// SubstringMatch or Char.
|
||||
return s.emitPrefixOrChar(TokenSubstringMatch, "*=") |
||||
case '<': |
||||
// CDO or Char.
|
||||
return s.emitPrefixOrChar(TokenCDO, "<!--") |
||||
} |
||||
// Test all regexps, in order.
|
||||
for _, token := range matchOrder { |
||||
if match := matchers[token].FindString(input); match != "" { |
||||
return s.emitToken(token, match) |
||||
} |
||||
} |
||||
// We already handled unclosed quotation marks and comments,
|
||||
// so this can only be a Char.
|
||||
r, width := utf8.DecodeRuneInString(input) |
||||
token := &Token{TokenChar, string(r), s.row, s.col} |
||||
s.col += width |
||||
s.pos += width |
||||
return token |
||||
} |
||||
|
||||
// updatePosition updates input coordinates based on the consumed text.
|
||||
func (s *Scanner) updatePosition(text string) { |
||||
width := utf8.RuneCountInString(text) |
||||
lines := strings.Count(text, "\n") |
||||
s.row += lines |
||||
if lines == 0 { |
||||
s.col += width |
||||
} else { |
||||
s.col = utf8.RuneCountInString(text[strings.LastIndex(text, "\n"):]) |
||||
} |
||||
s.pos += len(text) // while col is a rune index, pos is a byte index
|
||||
} |
||||
|
||||
// emitToken returns a Token for the string v and updates the scanner position.
|
||||
func (s *Scanner) emitToken(t tokenType, v string) *Token { |
||||
token := &Token{t, v, s.row, s.col} |
||||
s.updatePosition(v) |
||||
return token |
||||
} |
||||
|
||||
// emitSimple returns a Token for the string v and updates the scanner
|
||||
// position in a simplified manner.
|
||||
//
|
||||
// The string is known to have only ASCII characters and to not have a newline.
|
||||
func (s *Scanner) emitSimple(t tokenType, v string) *Token { |
||||
token := &Token{t, v, s.row, s.col} |
||||
s.col += len(v) |
||||
s.pos += len(v) |
||||
return token |
||||
} |
||||
|
||||
// emitPrefixOrChar returns a Token for type t if the current position
|
||||
// matches the given prefix. Otherwise it returns a Char token using the
|
||||
// first character from the prefix.
|
||||
//
|
||||
// The prefix is known to have only ASCII characters and to not have a newline.
|
||||
func (s *Scanner) emitPrefixOrChar(t tokenType, prefix string) *Token { |
||||
if strings.HasPrefix(s.input[s.pos:], prefix) { |
||||
return s.emitSimple(t, prefix) |
||||
} |
||||
return s.emitSimple(TokenChar, string(prefix[0])) |
||||
} |
@ -1,22 +1,15 @@ |
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||
*.o |
||||
*.a |
||||
# Binaries for programs and plugins |
||||
*.exe |
||||
*.exe~ |
||||
*.dll |
||||
*.so |
||||
*.dylib |
||||
|
||||
# Folders |
||||
_obj |
||||
_test |
||||
|
||||
# Architecture specific extensions/prefixes |
||||
*.[568vq] |
||||
[568vq].out |
||||
# Test binary, built with `go test -c` |
||||
*.test |
||||
|
||||
*.cgo1.go |
||||
*.cgo2.c |
||||
_cgo_defun.c |
||||
_cgo_gotypes.go |
||||
_cgo_export.* |
||||
# Output of the go coverage tool, specifically when used with LiteIDE |
||||
*.out |
||||
|
||||
_testmain.go |
||||
|
||||
*.exe |
||||
# goland idea folder |
||||
*.idea |
@ -1,18 +1,22 @@ |
||||
language: go |
||||
go: |
||||
- 1.1 |
||||
- 1.2 |
||||
- 1.3 |
||||
- 1.4 |
||||
- 1.5 |
||||
- 1.6 |
||||
- 1.7 |
||||
- 1.2.x |
||||
- 1.3.x |
||||
- 1.4.x |
||||
- 1.5.x |
||||
- 1.6.x |
||||
- 1.7.x |
||||
- 1.8.x |
||||
- 1.9.x |
||||
- 1.10.x |
||||
- 1.11.x |
||||
- 1.12.x |
||||
- tip |
||||
matrix: |
||||
allow_failures: |
||||
- go: tip |
||||
fast_finish: true |
||||
install: |
||||
- go get golang.org/x/net/html |
||||
- go get . |
||||
script: |
||||
- go test -v ./... |
||||
|
@ -1,6 +1,7 @@ |
||||
|
||||
1. John Graham-Cumming http://jgc.org/ |
||||
1. Mohammad Gufran https://github.com/Gufran |
||||
1. Steven Gutzwiller https://github.com/StevenGutzwiller |
||||
1. Andrew Krasichkov @buglloc https://github.com/buglloc |
||||
1. Mike Samuel mikesamuel@gmail.com |
||||
1. Dmitri Shuralyov shurcooL@gmail.com |
||||
1. https://github.com/opennota |
||||
1. https://github.com/Gufran |
@ -0,0 +1,10 @@ |
||||
module github.com/microcosm-cc/bluemonday |
||||
|
||||
go 1.9 |
||||
|
||||
require ( |
||||
github.com/aymerick/douceur v0.2.0 // indirect |
||||
github.com/chris-ramon/douceur v0.2.0 |
||||
github.com/gorilla/css v1.0.0 // indirect |
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 |
||||
) |
@ -0,0 +1,8 @@ |
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= |
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= |
||||
github.com/chris-ramon/douceur v0.2.0 h1:IDMEdxlEUUBYBKE4z/mJnFyVXox+MjuEVDJNN27glkU= |
||||
github.com/chris-ramon/douceur v0.2.0/go.mod h1:wDW5xjJdeoMm1mRt4sD4c/LbF/mWdEpRXQKjTR8nIBE= |
||||
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= |
||||
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= |
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis= |
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,17 @@ |
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
|
||||
// them here for backwards compatibility.
|
||||
|
||||
package unix |
||||
|
||||
const ( |
||||
DLT_HHDLC = 0x79 |
||||
IPV6_MIN_MEMBERSHIPS = 0x1f |
||||
IP_MAX_SOURCE_FILTER = 0x400 |
||||
IP_MIN_MEMBERSHIPS = 0x1f |
||||
RT_CACHING_CONTEXT = 0x1 |
||||
RT_NORTREF = 0x2 |
||||
) |
@ -0,0 +1,57 @@ |
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// illumos system calls not present on Solaris.
|
||||
|
||||
// +build amd64,illumos
|
||||
|
||||
package unix |
||||
|
||||
import "unsafe" |
||||
|
||||
func bytes2iovec(bs [][]byte) []Iovec { |
||||
iovecs := make([]Iovec, len(bs)) |
||||
for i, b := range bs { |
||||
iovecs[i].SetLen(len(b)) |
||||
if len(b) > 0 { |
||||
// somehow Iovec.Base on illumos is (*int8), not (*byte)
|
||||
iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) |
||||
} else { |
||||
iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) |
||||
} |
||||
} |
||||
return iovecs |
||||
} |
||||
|
||||
//sys readv(fd int, iovs []Iovec) (n int, err error)
|
||||
|
||||
func Readv(fd int, iovs [][]byte) (n int, err error) { |
||||
iovecs := bytes2iovec(iovs) |
||||
n, err = readv(fd, iovecs) |
||||
return n, err |
||||
} |
||||
|
||||
//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error)
|
||||
|
||||
func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { |
||||
iovecs := bytes2iovec(iovs) |
||||
n, err = preadv(fd, iovecs, off) |
||||
return n, err |
||||
} |
||||
|
||||
//sys writev(fd int, iovs []Iovec) (n int, err error)
|
||||
|
||||
func Writev(fd int, iovs [][]byte) (n int, err error) { |
||||
iovecs := bytes2iovec(iovs) |
||||
n, err = writev(fd, iovecs) |
||||
return n, err |
||||
} |
||||
|
||||
//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error)
|
||||
|
||||
func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) { |
||||
iovecs := bytes2iovec(iovs) |
||||
n, err = pwritev(fd, iovecs, off) |
||||
return n, err |
||||
} |
@ -0,0 +1,87 @@ |
||||
// go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build illumos,amd64
|
||||
|
||||
package unix |
||||
|
||||
import ( |
||||
"unsafe" |
||||
) |
||||
|
||||
//go:cgo_import_dynamic libc_readv readv "libc.so"
|
||||
//go:cgo_import_dynamic libc_preadv preadv "libc.so"
|
||||
//go:cgo_import_dynamic libc_writev writev "libc.so"
|
||||
//go:cgo_import_dynamic libc_pwritev pwritev "libc.so"
|
||||
|
||||
//go:linkname procreadv libc_readv
|
||||
//go:linkname procpreadv libc_preadv
|
||||
//go:linkname procwritev libc_writev
|
||||
//go:linkname procpwritev libc_pwritev
|
||||
|
||||
var ( |
||||
procreadv, |
||||
procpreadv, |
||||
procwritev, |
||||
procpwritev syscallFunc |
||||
) |
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func readv(fd int, iovs []Iovec) (n int, err error) { |
||||
var _p0 *Iovec |
||||
if len(iovs) > 0 { |
||||
_p0 = &iovs[0] |
||||
} |
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) |
||||
n = int(r0) |
||||
if e1 != 0 { |
||||
err = e1 |
||||
} |
||||
return |
||||
} |
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { |
||||
var _p0 *Iovec |
||||
if len(iovs) > 0 { |
||||
_p0 = &iovs[0] |
||||
} |
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) |
||||
n = int(r0) |
||||
if e1 != 0 { |
||||
err = e1 |
||||
} |
||||
return |
||||
} |
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func writev(fd int, iovs []Iovec) (n int, err error) { |
||||
var _p0 *Iovec |
||||
if len(iovs) > 0 { |
||||
_p0 = &iovs[0] |
||||
} |
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) |
||||
n = int(r0) |
||||
if e1 != 0 { |
||||
err = e1 |
||||
} |
||||
return |
||||
} |
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { |
||||
var _p0 *Iovec |
||||
if len(iovs) > 0 { |
||||
_p0 = &iovs[0] |
||||
} |
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) |
||||
n = int(r0) |
||||
if e1 != 0 { |
||||
err = e1 |
||||
} |
||||
return |
||||
} |
Loading…
Reference in new issue