working on it ...

Filters

Explore Public Snippets

Sort by

Found 24k snippets

    public by Kingsley Huynh  5540  29  5  0

    Golang HTTP Handler to Upload Image => Resize => Convert to JPEG => Save to Disk.

    Golang HTTP Handler to Upload Image = Resize = Convert to JPEG = Save to Disk. : gistfile1.go
    Go
    func UploadHandler(w http.ResponseWriter, r *http.Request) {
    
    	file, _, err := r.FormFile("file")
    	if err != nil {
    		log.Println(err)
    		http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
    		return
    	}
    
    	img, _, err := image.Decode(file)
    	if err != nil {
    		log.Println(err)
    		http.Error(w, http.StatusText(http.StatusUnsupportedMediaType), http.StatusUnsupportedMediaType)
    		return
    	}
    
    	m := resize.Resize(1000, 0, img, resize.Lanczos3)
    
    	out, err := os.Create("test_resized.jpg")
    	if err != nil {
    		log.Println(err)
    		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
    		return
    	}
    	defer out.Close()
    
    	// Encode into jpeg http://blog.golang.org/go-image-package
    	err = jpeg.Encode(out, m, nil)
    	if err != nil {
    		log.Println(err)
    		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
    		return
    	}
    }
    
    

    external by hewig  3  0  1  0

    call wallet core from go

    call wallet core from go: main.go
    package main
    
    // #cgo CFLAGS: -I${SRCDIR}/wallet-core/include
    // #cgo LDFLAGS: -L${SRCDIR}/wallet-core/build/ios -lTrezorCrypto -lTrustWalletCore -lprotobuf -lc++
    // #include <TrustWalletCore/TWHDWallet.h>
    // #include <TrustWalletCore/TWString.h>
    import "C"
    
    import "fmt"
    
    func main() {
    	fmt.Println("==> calling wallet core from go")
    	str := C.TWStringCreateWithUTF8Bytes(C.CString("confirm bleak useless tail chalk destroy horn step bulb genuine attract split"))
    	defer C.TWStringDelete(str)
    	valid := C.TWHDWalletIsValid(str)
    	fmt.Println("<== mnemonic is valid: ", valid)
    }
    
    
    

    external by ethicalvats  2  0  1  0

    movement in arrays

    movement in arrays: main.go
    package main
    
    import (
    	"bufio"
    	"fmt"
    	"os"
    	"strconv"
    	"strings"
    )
    
    func check(err error){
    	if err != nil{
    		panic(err)
    	}
    }
    
    func process(arr []int, moves int){
    	fmt.Println(arr, moves)
    }
    
    func start(problems []Problem){
    	fmt.Println("")
    	for _, p := range problems{
    		var strToInt []int
    		for _, s := range strings.Split(p.inputs, " "){
    			i, _ := strconv.Atoi(s)
    			strToInt = append(strToInt, i)
    		}
    		process(strToInt, p.moves)
    	}
    }
    
    type Problem struct {
    	inputs string
    	moves int
    }
    
    func main(){
    	var count int
    	var maxCount int
    	var problemIncrementCount = 0
    	var problems []Problem
    	var arrIn string
    	var moves int
    	reader := bufio.NewReader(os.Stdin)
    	for {
    		//fmt.Println(count, maxCount)
    		text, _ := reader.ReadString('\n')
    		if count == 0{
    			str := strings.TrimSuffix(text, "\n")
    			maxCount, _ = strconv.Atoi(str)
    		}else{
    			//fmt.Print("Enter text: ")
    			if problemIncrementCount >= 3{
    				problemIncrementCount = 0
    				arrIn = ""
    				moves = 0
    			}
    
    			if problemIncrementCount == 1{
    				arrIn = strings.TrimSuffix(text, "\n")
    			}else if problemIncrementCount == 2 {
    				moves, _ = strconv.Atoi(strings.TrimSuffix(text, "\n"))
    			}
    
    			problemIncrementCount++
    
    			if arrIn != "" && moves != 0{
    				problem := Problem{
    					inputs: arrIn,
    					moves:  moves,
    				}
    				problems = append(problems, problem)
    			}
    		}
    		if len(problems) == maxCount{
    			start(problems)
    			break
    		}
    		count ++
    	}
    }
    
    
    

    external by Naoki Matsumoto  3  0  1  0

    SoftEther Wrapper

    SoftEther Wrapper: main.go
    package main
    
    import (
        "flag"
        "fmt"
        "log"
        "os"
        "os/exec"
        "strings"
        "net"
    )
    
    func main(){
        flag.Parse()
        if flag.Arg(0) == "" {
            fmt.Println("Specify Connection Name")
            os.Exit(1)
        }
        out, err := exec.Command("vpncmd", "/client", "localhost", "/cmd", "AccountGet", flag.Arg(0)).Output()
        if err != nil {
            fmt.Printf("接続 %v は存在しません\n", flag.Arg(0))
            log.Fatal(err)
        }
        out_list := strings.Split(string(out), "\n")
        var server_address string = ""
        for i := 0; i < len(out_list); i++ {
            if strings.Index(out_list[i], "VPN Client>AccountGet") != -1 {
                server_address = out_list[i+5]
                break
            }
        }
        server_address = strings.Split(server_address, "|")[1]
        if net.ParseIP(server_address) != nil {
            fmt.Println("IP")
        } else {
            addr, err := net.ResolveIPAddr("ip", server_address)
            if err != nil {
                fmt.Println("Resolve error")
                os.Exit(1)
            }
            fmt.Printf("VPN Server URI: %v\n", server_address)
            server_address = addr.String()
        }
        fmt.Printf("VPN Server IP : %v\n", server_address)
    
        _ = exec.Command("ip", "route", "del", server_address).Run()
        _ = exec.Command("dhcpcd", "-x", "vpn_"+strings.ToLower(flag.Arg(0))).Run()
    
        out, err = exec.Command("sh", "-c", "ip route | grep default").Output()
        if err != nil {
            fmt.Println("Failed to exec ip route")
            log.Fatal(err)
            os.Exit(1)
        }
        if len(out) < 1 {
            fmt.Println("Failed to get default gateway")
            os.Exit(1)
        }
        gateway_string := strings.Split(string(out), "\n")[0]
        gateway_ip := strings.Split(string(gateway_string), " ")[2]
        gateway_dev := strings.Split(string(gateway_string), " ")[4]
        fmt.Printf("Default Gateway IP : %v\n", gateway_ip)
        fmt.Printf("Default Gateway Dev: %v\n", gateway_dev)
        fmt.Printf("Adding VPN Server Route...\n")
        out, err = exec.Command("ip", "route", "add", server_address, "via", gateway_ip, "dev", gateway_dev).Output()
        if err != nil {
            fmt.Println("Failed to exec ip route")
            log.Fatal(err)
            os.Exit(1)
        }
        fmt.Printf("Successfully Added VPN Server Route\n")
        out, err = exec.Command("vpncmd", "/client", "localhost", "/cmd", "AccountConnect", flag.Arg(0)).Output()
        if err != nil {
            fmt.Printf("Failed to Connect\n", flag.Arg(0))
            log.Fatal(err)
        }
    }
    
    
    
    

    external by Cartmanishere  2  0  1  0

    Golang client for grpc-python-golang example

    Golang client for grpc-python-golang example: client.go
    func main() {
        client, err := InitGrpcConnection()
        if err != nil {
            fmt.Println(err)
            os.Exit(0)
        } 
    
        reader := bufio.NewReader(os.Stdin)
        
        for {
            fmt.Println("Enter some text: ")
            text, _ := reader.ReadString('\n') 
            fmt.Println("Keywords:")
            keywords, err := client.MyKeywords(text)
            if err != nil {
                fmt.Println(err)
                continue
            }
            fmt.Println(keywords)
            fmt.Println()
        }
    }
    
    

    external by Cartmanishere  3  0  1  0

    Golang client for grpc-python-golang-example

    Golang client for grpc-python-golang-example: client.go
    package main
    
    import (
        "context"
        "google.golang.org/grpc"
        "fmt"
        "bufio"
        "os"
        nltk "golang/nltk_service"
    )
    
    type GrpcClient struct {
        conn    *grpc.ClientConn
        client  nltk.KeywordServiceClient
    }
    
    const SERVER_ADDR = "127.0.0.1:6000"
    
    func InitGrpcConnection() (*GrpcClient, error) {
        conn, err := grpc.Dial(SERVER_ADDR, grpc.WithInsecure())
        if err != nil {
            return nil, err
        }
        client := nltk.NewKeywordServiceClient(conn)
        return &GrpcClient{conn, client}, nil
    }
    
    func (g *GrpcClient) MyKeywords(text string) ([]string, error) {
        req := nltk.Request{
            Text: text,
        }
    
        res, err := g.client.GetKeywords(context.Background(), &req)
        if err != nil {
            return nil, err
        }
        
        return res.Keywords, nil
    }
    
    

    external by luoheng  4  0  1  0

    Saved from https://leetcode-cn.com/problems/letter-case-permutation/submissions/

    Saved from https://leetcode-cn.com/problems/letter-case-permutation/submissions/: letterCasePermutation.go
    func isAlpha(c byte) bool {
        return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z'
    }
    
    func toLowerOrUpper(c byte) byte {
        if c >= 'a' && c <= 'z' {
            return c - 'a' + 'A'
        } else if c >= 'A' && c <= 'Z' {
            return c - 'A' + 'a'
        }
        return c
    }
    
    func letterCasePermutation(S string) []string {
        sumC := uint(0)
        for i := 0; i < len(S); i++ {
            if isAlpha(S[i]) {
                sumC++
            }
        }
        res := make([]string, 0)
        s := make([]byte, len(S))
        for i := 0; i < (1 << sumC); i++ {
            cur := uint(0)
            for j := 0; j < len(S); j++ {
                if isAlpha(S[j]) {
                    if i&(1<<cur) != 0 {
                        s[j] = toLowerOrUpper(S[j])
                    } else {
                        s[j] = S[j]
                    }
                    cur++
                } else {
                    s[j] = S[j]
                }
            }
            res = append(res, string(s))
        }
        return res
    }
    
    

    external by ??  4  0  1  0

    websocket 吞吐量测试

    websocket 吞吐量测试: websocket_client.go
    package main
    
    import (
    	"flag"
    	"fmt"
    	"log"
    	"math/rand"
    	"net"
    	"time"
    
    	"golang.org/x/net/websocket"
    )
    
    var addr = flag.String("a", "ws://localhost:1833", "address")
    var num = flag.Int("c", 1, "connection number")
    var timeOut = flag.Int("t", 2, "timeout second")
    var msgLen = flag.Int("m", 1024, "message length")
    
    var msg []byte
    
    func main() {
    	flag.Parse()
    	msg = make([]byte, *msgLen)
    	rand.Read(msg)
    
    	startC := make(chan interface{})
    	closeC := make(chan interface{})
    	result := make(chan int64, *num)
    
    	//origin := "http://localhost:1833"
    	//url := "ws://localhost:1833"
    	for i := 0; i < *num; i++ {
    		conn, err := websocket.Dial(*addr, "", *addr)
    		if err != nil {
    			log.Fatal(err)
    		}
    		go handler(conn, startC, closeC, result)
    	}
    
    	// start
    	close(startC)
    
    	time.Sleep(time.Duration(*timeOut) * time.Second)
    	// stop
    	close(closeC)
    
    	var totalMessagesRead int64
    	for i := 0; i < *num; i++ {
    		totalMessagesRead += <-result
    	}
    
    	fmt.Println(totalMessagesRead/int64(*timeOut*1024*1024), " MiB/s throughput")
    }
    
    func handler(conn net.Conn, startC chan interface{}, closeC chan interface{}, result chan int64) {
    	var count int64
    	buf := make([]byte, 2*(*msgLen))
    	<-startC
    
    	_, e := conn.Write(msg)
    	if e != nil {
    		fmt.Println("Error to send message because of ", e.Error())
    	}
    
    	for {
    		select {
    		case <-closeC:
    			result <- count
    			conn.Close()
    			return
    		default:
    			n, err := conn.Read(buf)
    			if n > 0 {
    				count += int64(n)
    			}
    			if err != nil {
    				fmt.Print("Error to read message because of ", err)
    				result <- count
    				conn.Close()
    				return
    			}
    
    			_, err = conn.Write(buf[:n])
    			if err != nil {
    				fmt.Println("Error to send message because of ", e.Error())
    			}
    		}
    	}
    }
    
    
    

    external by Asish Panda  3  0  1  0

    Redact checker

    Redact checker : orchestrator.go
    // All the orchestrator related functionalities like initializing Runner object, running orchestrator for all
    // the steps mentioned in the config, collecting prometheus metric
    package orchestrator
    
    import (
    	"context"
    	"encoding/json"
    	"fmt"
    	"io/ioutil"
    	"math/rand"
    	"strings"
    	"sync"
    
    	"github.com/razorpay/mozart/app/utils/flatten"
    
    	"github.com/razorpay/mozart/app/config/redact"
    	"github.com/razorpay/mozart/app/orchestrator/handler"
    	"github.com/razorpay/mozart/app/orchestrator/helper"
    	"github.com/razorpay/mozart/app/orchestrator/parallel"
    	"github.com/razorpay/mozart/app/orchestrator/stepGroup"
    
    	"github.com/razorpay/mozart/app/auditLog"
    	"github.com/razorpay/mozart/app/environment"
    	"github.com/spf13/cast"
    
    	"github.com/razorpay/mozart/app/config"
    	"github.com/razorpay/mozart/app/constants"
    
    	"github.com/razorpay/mozart/app/rzperrors"
    
    	"github.com/gin-gonic/gin"
    
    	"github.com/razorpay/mozart/app/datasink"
    
    	"github.com/razorpay/mozart/app/logger"
    )
    
    //StepMap ...step name to component function map and step input config
    type StepMap struct {
    	step       string
    	component  string
    	function   func(map[string]interface{}, *logger.Logger) (interface{}, rzperrors.RzpError)
    	stepInput  map[string]interface{}
    	stepRedact redact.Redact
    }
    
    //Runner ... task runner for orchestrator
    type Runner struct {
    	ctx          context.Context
    	lastCh       interface{}    // last channel in the event action
    	funcs        []StepMap      // all runnable functions with inputs
    	activateOnce sync.Once      // prevents multiple activations of runner
    	sink         *datasink.Sink // DataSink Singleton instance per run
    	err          rzperrors.RzpError
    	cfg          *config.Config
    }
    
    // Handler defines function type used as orchestrator stages, implementations of handler provided by the user.
    // Each handler returns the new out(put) runnable function.
    // runFn will be executed in an isolated fashion. runFn is thread-safe and may have mutable state. It will live
    // all runner lifetime and usually implements read->process->write cycle. If runFn returns != nil for RzpError it indicates
    // critical failure and will stop, with canceled context, all handlers in the orchestrator.
    
    //NewStepMap ... creates a new stepmap object
    func NewStepMap(stepName string, component string, stepInput map[string]interface{}, stepRedact redact.Redact, fn func(map[string]interface{}, *logger.Logger) (interface{}, rzperrors.RzpError)) StepMap {
    	return StepMap{
    		step:       stepName,
    		component:  component,
    		function:   fn,
    		stepInput:  stepInput,
    		stepRedact: stepRedact,
    	}
    }
    
    // New creates runner object with context and common errgroup. This errgroup used to schedule and cancel all handlers.
    // options defines non-default parameters.
    func New(options ...Option) *Runner {
    
    	// default runner object parameters
    	result := Runner{
    		ctx:  context.Background(),
    		sink: datasink.CreateInstance(),
    	}
    
    	// apply options
    	for _, opt := range options {
    		opt(&result)
    	}
    
    	return &result
    }
    
    func (f *Runner) ReadConfig(filename string) rzperrors.RzpError {
    	parser := config.New(f.Context())
    	cfg, err := parser.Parse(filename)
    
    	lgr := logger.Get(f.Context())
    	if err != nil {
    		return rzperrors.NewError(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL, err, map[string]string{
    			constants.ModuleKey: "LoadConfig Parser",
    			constants.StepName:  "ReadConfig",
    			constants.Component: "orchestrator",
    		}, "")
    	}
    
    	if err := f.ConstructSteps(cfg); err != nil {
    		if _, ok := err.Data()[constants.StepName]; !ok {
    			err.SetDataKey(constants.StepName, "ConstructSteps")
    		}
    
    		if _, ok := err.Data()[constants.Component]; !ok {
    			err.SetDataKey(constants.Component, "orchestrator")
    		}
    		return err
    	}
    
    	ginContextInterface := f.Context().Value(constants.Gin)
    
    	if ginContextInterface == nil {
    
    		e := rzperrors.NewErrorFromDesc(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL, "Missing GIN Context in orchestrator's context", map[string]string{
    			constants.ModuleKey: "utils.LoadConfig",
    			"message":           "Missing GIN Context in orchestrator's context",
    		}, logger.OrchestratorError)
    		lgr.Error(e)
    		return e
    	}
    
    	if ginContext, ok := ginContextInterface.(*gin.Context); ok {
    
    		lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    			constants.ModuleKey: "utils.LoadConfig",
    			"message":           "Setting Error criteria to GIN context",
    			"errorCriteria":     fmt.Sprintf("%v", ginContextInterface),
    		})
    		ginContext.Set(constants.ErrorCriteria, cfg.ErrorCriteria)
    
    		lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    			constants.ModuleKey: "utils.LoadConfig",
    			"message":           "Setting Formatted Gateway Data's datasink Location to GIN context",
    			"errorCriteria":     fmt.Sprintf("%v", ginContextInterface),
    		})
    		ginContext.Set(constants.ResponseData, cfg.ResponseData)
    
    		lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    			constants.ModuleKey: "utils.LoadConfig",
    			"message":           "Setting Global Redact Literals to GIN context",
    			"GlobalLiterals":    fmt.Sprintf("%v", ginContextInterface),
    		})
    		ginContext.Set(constants.RedactLiterals, cfg.GlobalRedactLiterals)
    
    	} else {
    		e := rzperrors.NewErrorFromDesc(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL, "Error while type asserting GIN context", map[string]string{
    			constants.ModuleKey: "utils.LoadConfig",
    			"message":           "Expected GIN Context but received something else",
    		}, logger.OrchestratorError)
    		lgr.Error(e)
    
    		return e
    	}
    
    	return nil
    }
    
    func (f *Runner) ConstructSteps(cfg *config.Config) rzperrors.RzpError {
    	lgr := logger.Get(f.Context())
    
    	f.cfg = cfg
    	it := config.NewConfigIterator(f.Context(), cfg)
    
    	if err := it.IsValidConfig(); err != nil {
    		return err
    	}
    	for _, step := range cfg.Steps {
    		lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    			constants.ModuleKey: "orchestrator.ConstructSteps",
    			"message":           fmt.Sprintf("Step:%s", step.Name),
    		})
    	}
    
    	currentStep := it.CurrentStep()
    	if err := f.addComponent(currentStep); err != nil {
    		return err
    	}
    
    	for it.HasNext() == true {
    		currentStep := it.NextStep()
    		if err := f.addComponent(currentStep); err != nil {
    			return err
    		}
    	}
    
    	return nil
    }
    
    // Add ... adds one or more handlers/components. Each will be linked to the previous one and order of handlers defines sequence of
    // steps in the orchestrator. The config reader will use this to add the list of steps for the orchestrator run
    func (f *Runner) Add(stepName string, component string, handler handler.Handler, stepInput map[string]interface{}, stepRedact redact.Redact) *Runner {
    	fn := handler(f.ctx, stepName, component)
    
    	f.funcs = append(f.funcs, NewStepMap(stepName, component, stepInput, stepRedact, fn))
    	return f
    }
    
    func (f *Runner) Initialize(postData map[string]interface{}) {
    	initStep := "init"
    	lgr := logger.Get(f.ctx)
    	initData := datasink.BuildEmptySinkObject()
    	dataCopy := flatten.Flatten(postData)
    
    	isRedacted := false
    
    	for k, _ := range logger.SensitiveFieldsMap {
    		if _, ok := dataCopy[k]; ok {
    			dataCopy[k] = "***********"
    			isRedacted = true
    		}
    	}
    
    	initData[datasink.InputKey] = postData
    	initData[datasink.OutputKey] = postData
    	initData[datasink.ErrorKey] = nil
    	if isRedacted == true {
    		initData[datasink.RedactedInputKey] = dataCopy
    		initData[datasink.RedactedOuputKey] = dataCopy
    		initData[datasink.RedactedErrorKey] = nil
    	}
    
    	f.DataSink().UpdateDataSink(initStep, initData, lgr)
    }
    
    // Go activates Runner. Should be called exactly once after all handlers added, next calls ignored.
    func (f *Runner) Go() *Runner {
    	envObj := environment.GetInstance()
    	auditFlag := envObj.Get().AuditFlag
    
    	requestId, _ := helper.GetRequestIdAndTraceId(f.Context())
    	response := OrchestratorOutput{}
    
    	// if the response data config is being loaded during the execution of the parallelizer
    	// it shall check the config and load the data from there
    	// for the normal flow it shal load from the ctx
    	if f.cfg != nil && f.cfg.ResponseData != nil {
    		response.ResponseData = f.cfg.ResponseData
    	} else {
    		response.ResponseData = f.ctx.Value(constants.ResponseData)
    	}
    
    	// if the response data config is being loaded during the execution of the parallelizer
    	// it shall check the config and load the data from there
    	// for the normal flow it shal load from the ctx
    	if f.cfg != nil && f.cfg.ErrorCriteria != nil {
    		response.ErrorCriteria = f.cfg.ErrorCriteria
    	} else {
    		response.ErrorCriteria = f.ctx.Value(constants.ErrorCriteria)
    	}
    
    	var globalLiteral interface{}
    
    	ginContext := f.Context().Value(constants.Gin)
    
    	if ginCtx, ok := ginContext.(*gin.Context); ok && ginCtx != nil {
    		globalLiteral, _ = ginCtx.Get(constants.RedactLiterals)
    	}
    
    	lgr := logger.Get(f.ctx)
    	var globalRedactLiteral redact.Redact
    	var ok bool
    	if globalRedactLiteral, ok = globalLiteral.(redact.Redact); !ok {
    		globalRedactLiteral = redact.Redact{nil, nil, nil}
    	}
    
    	//f.activateOnce.Do(func() {
    	for _, sMap := range f.funcs {
    		if f.err != nil {
    			break
    		}
    		fn := sMap.function
    		if fn != nil {
    			lgr.Log(logger.INFO, logger.StepStart, map[string]interface{}{
    				"step": sMap.step,
    			})
    
    			resolvedStepInput := make(map[string]interface{})
    			helper.SearchInputForSinkKeyAndReplaceValue(sMap.stepInput, f.DataSink(), resolvedStepInput, false)
    			lgr.Redact = getResolvedRedactData(sMap.stepRedact, globalRedactLiteral, f)
    			sinkObj := f.sink.BuildStepData(resolvedStepInput)
    
    			f.sink.UpdateDataSink(sMap.step, sinkObj, lgr)
    
    			if output, err := fn(resolvedStepInput, lgr); err != nil {
    				lgr.Log(logger.WARN, logger.StepError, map[string]interface{}{
    					"step": sMap.step,
    				})
    
    				if _, ok := err.Data()[constants.StepName]; !ok {
    					err.SetDataKey(constants.StepName, sMap.step)
    				}
    
    				if _, ok := err.Data()[constants.Component]; !ok {
    					err.SetDataKey(constants.Component, sMap.component)
    				}
    
    				// We are specially doing this for express. In express, even if validation fails, we need to format a proper response
    				// and send back to the calling system. To support, that validator returns a custom formatted data.
    				// On validation failure, validator will return both data and error.
    				// Returns error to break the flow execution.
    				// Returns formattedValue to be returned to the calling system.
    				// DO NOT ADD ANY MORE LOGIC HERE. THIS IS A HACK. This will move away with reading config as a tree
    				if sMap.component == "Validator" && err.ErrorCode() == rzperrors.VALIDATION_ERROR_WITH_RESPONSE {
    					response.Response = output
    					err = rzperrors.NewError(rzperrors.VALIDATION_ERROR, err.Criticality(), err, err.Data(), err.GetTraceCode())
    				} else {
    					response.Response = err
    				}
    
    				f.err = err
    				sinkObj[datasink.OutputKey] = output
    				sinkObj[datasink.ErrorKey] = err
    				f.sink.UpdateDataSink(sMap.step, sinkObj, lgr)
    				// break will ensure the orchestrator will not move forward, if we encounter an error in a component
    				break
    			} else {
    
    				switch output.(type) {
    				case parallel.ParallelStruct:
    					output, err = f.parallel(output.(parallel.ParallelStruct))
    				case stepGroup.StepGroupStruct:
    					output, err = f.stepGroup(output.(stepGroup.StepGroupStruct))
    				default:
    					//Normal step execution
    				}
    
    				if err != nil {
    					lgr.Log(logger.WARN, logger.StepError, map[string]interface{}{
    						"step": sMap.step,
    					})
    
    					if _, ok := err.Data()[constants.StepName]; !ok {
    						err.SetDataKey(constants.StepName, sMap.step)
    					}
    
    					if _, ok := err.Data()[constants.Component]; !ok {
    						err.SetDataKey(constants.Component, sMap.component)
    					}
    
    					response.Response = err
    					f.err = err
    					sinkObj[datasink.OutputKey] = output
    					sinkObj[datasink.ErrorKey] = err
    					f.sink.UpdateDataSink(sMap.step, sinkObj, lgr)
    					break
    				}
    
    				lgr.Log(logger.INFO, logger.StepSuccess, map[string]interface{}{
    					"step": sMap.step,
    				})
    
    				response.Response = output
    				sinkObj[datasink.OutputKey] = output
    				sinkObj[datasink.ErrorKey] = err
    				f.sink.UpdateDataSink(sMap.step, sinkObj, lgr)
    			}
    		}
    	}
    
    	if auditFlag == true {
    		auditSinkString, sinkError := datasink.GetJSON(f.sink.Data)
    
    		if sinkError != nil {
    			lgr.Error(sinkError)
    		} else {
    			auditRecord := auditLog.GetAuditRecordForRequestId(requestId)
    
    			sinkMap := make(map[string]interface{})
    			sinkUnmarshalErr := json.Unmarshal([]byte(auditSinkString), &sinkMap)
    			if sinkUnmarshalErr != nil {
    				e := rzperrors.NewError(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL, sinkUnmarshalErr,
    					map[string]string{
    						"description": "Unable to unmarshal datasink for audit log",
    						"error":       sinkUnmarshalErr.Error(),
    					}, logger.AuditError)
    				lgr.Error(e)
    			} else {
    				if auditRecord != nil && auditRecord.DataSink != nil {
    					auditRecord.DataSink.Data = sinkMap
    				} else {
    					e := rzperrors.NewErrorFromDesc(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL, "Unable to retrieve datasink for auditLog",
    						map[string]string{"step": "Audit log datasink update"}, logger.StepError)
    					lgr.Error(e)
    				}
    			}
    		}
    	}
    
    	response.ErrorCriteria = helper.ResolveValues(response.ErrorCriteria, f.DataSink(), false)
    	response.ResponseData = helper.ResolveValues(response.ResponseData, f.DataSink(), false)
    	if f.cfg != nil {
    		if bs := f.cfg.BulkSource; bs != "" {
    			f.AddContext(constants.BulkSource, bs)
    		}
    	}
    	d, _ := datasink.GetJSON(f.DataSink().Data)
    	if strings.Contains(d, "build_amex_url") {
    		random_string:= randSeq(7)
    		file_name := []string{"/Users/asishpanda/test_files/", random_string, ".txt"}
    		_ = ioutil.WriteFile(strings.Join(file_name, ""), []byte(d), 0644)
    
    	}
    
    	fmt.Println(datasink.GetJSON(f.DataSink().Data))
    
    	f.lastCh = response
    
    	return f
    }
    
    
    func randSeq(n int) string {
    	var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
    	b := make([]rune, n)
    	for i := range b {
    		b[i] = letters[rand.Intn(len(letters))]
    	}
    	return string(b)
    }
    
    // stepGroup initiates a new instance of runner for the sequence of steps specified in
    // the step group, with input from Input field of stepGrouper component output struct
    func (f *Runner) stepGroup(data stepGroup.StepGroupStruct) (interface{}, rzperrors.RzpError) {
    	lgr := logger.Get(f.ctx)
    
    	steps, ok := f.cfg.StepGroups[data.Name]
    	if !ok {
    		msg := fmt.Sprintf("No step group found with the id [%s]", data.Name)
    		e := rzperrors.NewErrorFromDesc(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL, msg,
    			map[string]string{
    				"message": msg,
    				"module":  "finding and running step group",
    			}, logger.StepGroupError)
    		lgr.Error(e)
    		return nil, e
    
    	}
    	groupConf := config.NewConfig(steps)
    	groupConf.Parallel = f.cfg.Parallel
    	groupConf.StepGroups = f.cfg.StepGroups
    
    	var (
    		out, outputData interface{}
    		err             rzperrors.RzpError
    		sink            datasink.Sink
    	)
    
    	runStepGroup(f.ctx, groupConf, data.Input, nil, &out, &sink, &err)
    	if err != nil {
    		return map[string]interface{}{
    			"sink":   sink.Data,
    			"output": nil,
    		}, err
    	}
    
    	orchestratorOp, ok := out.(OrchestratorOutput)
    	if !ok {
    		e := rzperrors.NewErrorFromDesc(rzperrors.INTERNAL_SERVER_ERROR, rzperrors.CRITICAL,
    			"Step Group output is not of type OrchestratorOutput", map[string]string{
    				"module": "step group output decode",
    			}, logger.StepGroupError)
    		lgr.Error(e)
    		return nil, e
    	}
    	switch orchestratorOp.Response.(type) {
    	case rzperrors.RzpError:
    		err = orchestratorOp.Response.(rzperrors.RzpError)
    	default:
    		outputData = orchestratorOp.Response
    	}
    	return map[string]interface{}{
    		"sink":   sink.Data,
    		"output": outputData,
    	}, err
    
    }
    
    // parallel gets the slice of parallel-struct from parallelizer component,
    // initializes separate Runner for each section from slice, and then runs them all
    // parallely
    func (f *Runner) parallel(input parallel.ParallelStruct) (interface{}, rzperrors.RzpError) {
    	lgr := logger.Get(f.ctx)
    
    	out := make([]interface{}, len(input.Sections))
    	datasinkSlice := make([]datasink.Sink, len(input.Sections))
    	var wg sync.WaitGroup
    
    	// Iterate over list of sections and run them in parallel.
    	// Every section has name & input.
    	// Section's name is the section to execute from config.
    	// In case no section is found in config with given name, error is returned.
    	for _, section := range input.Sections {
    		if section.Name == "" {
    			return nil, rzperrors.NewParallelizerErrorFromDesc("section empty", nil, "")
    		}
    
    		block, ok := f.cfg.Parallel[section.Name]
    		if !ok {
    			lgr.Log(logger.INFO, logger.ParallelizerError, map[string]interface{}{
    				"message":      "couldn't find section",
    				"section_name": section.Name,
    			})
    
    			return nil, rzperrors.NewParallelizerErrorFromDesc("section not found in config",
    				map[string]string{"section_name": section.Name}, logger.ParallelizerError)
    		}
    
    		_, ok = f.cfg.StepGroups[block.StepGroup]
    		if !ok {
    			msg := "step group for the section not found in config"
    			lgr.Log(logger.INFO, logger.ParallelizerError, map[string]interface{}{
    				"message":      msg,
    				"section_name": section.Name,
    			})
    
    			return nil, rzperrors.NewParallelizerErrorFromDesc(msg,
    				map[string]string{"section_name": section.Name}, logger.ParallelizerError)
    		}
    	}
    
    	// stores errors if reading config causes an error
    	configError := make([]rzperrors.RzpError, len(input.Sections))
    
    	for i, section := range input.Sections {
    		parallelBlock := f.cfg.Parallel[section.Name]
    		steps := f.cfg.StepGroups[parallelBlock.StepGroup]
    		parallCfg := config.NewConfig(steps)
    		parallCfg.ErrorCriteria = parallelBlock.ErrorCriteria
    		parallCfg.ResponseData = parallelBlock.ResponseData
    
    		wg.Add(1)
    		go runStepGroup(f.ctx, parallCfg, section.Input, &wg, &out[i], &datasinkSlice[i], &configError[i])
    	}
    
    	wg.Wait()
    	return merge(f.ctx, out, datasinkSlice, configError, f.cfg.Parallel)
    }
    
    // Wait for completion, returns error if any happened in handlers.
    func (f *Runner) Wait() rzperrors.RzpError {
    	//return f.group.Wait()
    	return f.err
    }
    
    // Channel returns last (final) channel in runner. Usually consumers don't need
    // this channel, but can be used to return some final result(s)
    func (f *Runner) Channel() interface{} {
    	return f.lastCh
    }
    
    // Funcs ... returns all the functions of the runner
    func (f *Runner) Funcs() []StepMap {
    	return f.funcs
    }
    
    //Context ... returns the current context
    func (f *Runner) Context() context.Context {
    	return f.ctx
    }
    
    //DataSink ... returns the datasink object
    func (f *Runner) DataSink() *datasink.Sink {
    	return f.sink
    }
    
    //GetContextValue ... gets the orchestrator's context value like gateway name, gateway action etc
    func (f *Runner) GetContextValue(contextKey string) string {
    	cValue, ok := f.ctx.Value(contextKey).(string)
    	if !ok {
    		return ""
    	}
    	return cValue
    }
    
    //AddContext ... adds given context to current context
    func (f *Runner) AddContext(key string, value interface{}) {
    	ctx := WithContext(f.ctx)
    	f.ctx = context.WithValue(ctx, key, value)
    }
    
    func (f *Runner) addComponent(currentStep config.Step) rzperrors.RzpError {
    	lgr := logger.Get(f.Context())
    
    	lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    		"method":  "utils.addComponent",
    		"message": fmt.Sprintf("Adding Component:%s", currentStep.Name),
    	})
    
    	hndlr, err := config.GetHandler(f.Context(), currentStep)
    	if err != nil {
    		return err
    	}
    
    	f.Add(currentStep.Name, currentStep.Component, hndlr(), currentStep.Input, currentStep.RedactBlock)
    	lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    		"method":  "utils.addComponent",
    		"message": fmt.Sprintf("Adding Custom Component:%v", currentStep.Component),
    	})
    
    	lgr.Log(logger.DEBUG, logger.MiscInfo, map[string]interface{}{
    		"method":  "utils.addComponent",
    		"message": fmt.Sprintf("Total Functions Added:%v", len(f.Funcs())),
    	})
    
    	return nil
    }
    
    // runStepGroup creates new runner for input & config and runs all the steps for
    // the specified config. after executing all the steps, it copies output to out interface,
    // if there was any error while execution, runStepGroup sets the error in err.
    // if it was provided a non nil waitGroup, after completing all the steps,
    // it decrements the waitgroup to signal its completion
    func runStepGroup(ctx context.Context, cfg *config.Config, input map[string]interface{},
    	wg *sync.WaitGroup, out *interface{}, sink *datasink.Sink, cfgErr *rzperrors.RzpError) {
    
    	if wg != nil {
    		defer wg.Done()
    	}
    
    	newCtx := WithContext(ctx)
    	runner := New(Context(newCtx))
    	runner.Initialize(input)
    	if err := runner.ConstructSteps(cfg); err != nil {
    		*cfgErr = err
    		return
    	}
    	runner.Go()
    	*out = runner.Channel()
    	*sink = *runner.sink
    	return
    }
    
    //merge merges the input from different interfaces into one interface.
    // if a step in the config is set to exitOnFailure we set the error
    // that occured in a parallel step as the error response from the parallel step
    func merge(ctx context.Context, sectionOutputs []interface{}, sinkSlice []datasink.Sink, configError []rzperrors.RzpError, configs map[string]config.ParallelBlock) (interface{}, rzperrors.RzpError) {
    	var err rzperrors.RzpError
    	for i, confErr := range configError {
    		if confErr != nil {
    			err = rzperrors.NewParallelizerErrorFromDesc("Failed parallel step, find list of errors", map[string]string{}, logger.ParallelizerError)
    			nestedErrors := err.NestedError()
    			nestedErrors[fmt.Sprint("nestedError:", i)] = confErr
    		}
    	}
    
    	// if the parallel block is set to exitOnFailure then we set the error from a step
    	// as the error of the entire parallel component
    	for _, c := range configs {
    		if c.ExitOnFailure {
    			for _, sectOut := range sectionOutputs {
    				if orOut, ok := sectOut.(OrchestratorOutput); ok {
    					if err, ok = orOut.Response.(rzperrors.RzpError); ok {
    						break // exit check for err
    					}
    				}
    			}
    		}
    	}
    
    	return map[string]interface{}{
    		// "output": out,
    		"output": sectionOutputs,
    		"sink":   sinkSlice,
    	}, err
    }
    
    func getResolvedRedactData(redactData redact.Redact, globalLiteral redact.Redact, f *Runner) redact.Redact {
    	redactData.Literals = append(redactData.Literals, globalLiteral.Literals...)
    
    	for index, literal := range redactData.Literals {
    		resolvedLiteral := helper.ResolveVariablesWithValues(cast.ToString(literal), f.DataSink(), false)
    		redactData.Literals[index] = resolvedLiteral
    	}
    
    	return redactData
    }
    
    
    

    external by ntk148v  6  0  1  0

    etcd `concurrency.Election` example with connection interruption detection and initial leadership status reporting

    etcd `concurrency.Election` example with connection interruption detection and initial leadership status reporting: main.go
    package main
    
    import (
    	"context"
    	"fmt"
    	"os"
    	"os/signal"
    	"syscall"
    	"time"
    
    	etcd "go.etcd.io/etcd/clientv3"
    	"go.etcd.io/etcd/clientv3/concurrency"
    	"github.com/pkg/errors"
    	log "github.com/sirupsen/logrus"
    )
    
    var (
    	electionName     = "/election-test"
    	candidateName    = "worker01"
    	resumeLeader     = true
    	TTL              = 35
    	reconnectBackOff = time.Second * 2
    	session          *concurrency.Session
    	election         *concurrency.Election
    	client           *etcd.Client
    )
    
    func main() {
    	var err error
    
    	log.SetLevel(log.InfoLevel)
    
    	client, err = etcd.New(etcd.Config{
    		Endpoints: []string{"localhost:2379"},
    	})
    	if err != nil {
    		fmt.Println(err)
    		os.Exit(1)
    	}
    
    	ctx, cancel := context.WithCancel(context.Background())
    	leaderChan, err := runElection(ctx)
    	if err != nil {
    		fmt.Println(err)
    		os.Exit(1)
    	}
    
    	c := make(chan os.Signal, 1)
    	signal.Notify(c, syscall.SIGINT)
    	go func() {
    		for {
    			select {
    			case <-c:
    				fmt.Printf("Resign election and exit\n")
    				cancel()
    			}
    		}
    	}()
    
    	for leader := range leaderChan {
    		fmt.Printf("Leader: %t\n", leader)
    	}
    
    	cancel()
    }
    
    func runElection(ctx context.Context) (<-chan bool, error) {
    	var observe <-chan etcd.GetResponse
    	var node *etcd.GetResponse
    	var errChan chan error
    	var isLeader bool
    	var err error
    
    	var leaderChan chan bool
    	setLeader := func(set bool) {
    		// Only report changes in leadership
    		if isLeader == set {
    			return
    		}
    		isLeader = set
    		leaderChan <- set
    	}
    
    	if err = newSession(ctx, 0); err != nil {
    		return nil, errors.Wrap(err, "while creating initial session")
    	}
    
    	go func() {
    		leaderChan = make(chan bool, 10)
    		defer close(leaderChan)
    
    		for {
    			// Discover who if any, is leader of this election
    			if node, err = election.Leader(ctx); err != nil {
    				if err != concurrency.ErrElectionNoLeader {
    					log.Errorf("while determining election leader: %s", err)
    					goto reconnect
    				}
    			} else {
    				// If we are resuming an election from which we previously had leadership we
    				// have 2 options
    				// 1. Resume the leadership if the lease has not expired. This is a race as the
    				//    lease could expire in between the `Leader()` call and when we resume
    				//    observing changes to the election. If this happens we should detect the
    				//    session has expired during the observation loop.
    				// 2. Resign the leadership immediately to allow a new leader to be chosen.
    				//    This option will almost always result in transfer of leadership.
    				if string(node.Kvs[0].Value) == candidateName {
    					// If we want to resume leadership
    					if resumeLeader {
    						// Recreate our session with the old lease id
    						if err = newSession(ctx, node.Kvs[0].Lease); err != nil {
    							log.Errorf("while re-establishing session with lease: %s", err)
    							goto reconnect
    						}
    						election = concurrency.ResumeElection(session, electionName,
    							string(node.Kvs[0].Key), node.Kvs[0].CreateRevision)
    
    						// Because Campaign() only returns if the election entry doesn't exist
    						// we must skip the campaign call and go directly to observe when resuming
    						goto observe
    					} else {
    						// If resign takes longer than our TTL then lease is expired and we are no
    						// longer leader anyway.
    						ctx, cancel := context.WithTimeout(ctx, time.Duration(TTL)*time.Second)
    						election := concurrency.ResumeElection(session, electionName,
    							string(node.Kvs[0].Key), node.Kvs[0].CreateRevision)
    						err = election.Resign(ctx)
    						cancel()
    						if err != nil {
    							log.Errorf("while resigning leadership after reconnect: %s", err)
    							goto reconnect
    						}
    					}
    				}
    			}
    			// Reset leadership if we had it previously
    			setLeader(false)
    
    			// Attempt to become leader
    			errChan = make(chan error)
    			go func() {
    				// Make this a non blocking call so we can check for session close
    				errChan <- election.Campaign(ctx, candidateName)
    			}()
    
    			select {
    			case err = <-errChan:
    				if err != nil {
    					if errors.Cause(err) == context.Canceled {
    						return
    					}
    					// NOTE: Campaign currently does not return an error if session expires
    					log.Errorf("while campaigning for leader: %s", err)
    					session.Close()
    					goto reconnect
    				}
    			case <-ctx.Done():
    				session.Close()
    				return
    			case <-session.Done():
    				goto reconnect
    			}
    
    		observe:
    			// If Campaign() returned without error, we are leader
    			setLeader(true)
    
    			// Observe changes to leadership
    			observe = election.Observe(ctx)
    			for {
    				select {
    				case resp, ok := <-observe:
    					if !ok {
    						// NOTE: Observe will not close if the session expires, we must
    						// watch for session.Done()
    						session.Close()
    						goto reconnect
    					}
    					if string(resp.Kvs[0].Value) == candidateName {
    						setLeader(true)
    					} else {
    						// We are not leader
    						setLeader(false)
    						break
    					}
    				case <-ctx.Done():
    					if isLeader {
    						// If resign takes longer than our TTL then lease is expired and we are no
    						// longer leader anyway.
    						ctx, cancel := context.WithTimeout(context.Background(), time.Duration(TTL)*time.Second)
    						if err = election.Resign(ctx); err != nil {
    							log.Errorf("while resigning leadership during shutdown: %s", err)
    						}
    						cancel()
    					}
    					session.Close()
    					return
    				case <-session.Done():
    					goto reconnect
    				}
    			}
    
    		reconnect:
    			setLeader(false)
    
    			for {
    				if err = newSession(ctx, 0); err != nil {
    					if errors.Cause(err) == context.Canceled {
    						return
    					}
    					log.Errorf("while creating new session: %s", err)
    					tick := time.NewTicker(reconnectBackOff)
    					select {
    					case <-ctx.Done():
    						tick.Stop()
    						return
    					case <-tick.C:
    						tick.Stop()
    					}
    					continue
    				}
    				break
    			}
    		}
    	}()
    
    	// Wait until we have a leader before returning
    	for {
    		resp, err := election.Leader(ctx)
    		if err != nil {
    			if err != concurrency.ErrElectionNoLeader {
    				return nil, err
    			}
    			time.Sleep(time.Millisecond * 300)
    			continue
    		}
    		// If we are not leader, notify the channel
    		if string(resp.Kvs[0].Value) != candidateName {
    			leaderChan <- false
    		}
    		break
    	}
    	return leaderChan, nil
    }
    
    func newSession(ctx context.Context, id int64) error {
    	var err error
    	session, err = concurrency.NewSession(client, concurrency.WithTTL(TTL),
    		concurrency.WithContext(ctx), concurrency.WithLease(etcd.LeaseID(id)))
    	if err != nil {
    		return err
    	}
    	election = concurrency.NewElection(session, electionName)
    	return nil
    }
    
    
    
    • Public Snippets
    • Channels Snippets