Quick Start

gRPC is like JSON but it's centralised and auto-code generation.

The idea is microservices will share the same sets of .proto files, to each own will generate the code and call server apis.

This .proto files are probably stored somewhere centralised. Code generation is handled by the client however they want.

As a result, both client and server will share the same interface and everyone is automatically updated.

To get start

We create some protos file:

├── client.py
├── Makefile
├── protos
│   ├── requests
│   │   └── get_machine_request.proto
│   ├── virtual_machine.proto
│   └── vm_service.proto
└── server.py

vm_service.proto

syntax = "proto3";

import "virtual_machine.proto";
import "requests/get_machine_request.proto";
import "google/protobuf/empty.proto";

service VirtualMachineService{
    rpc GetMachine(GetMachineRequest) returns (VirtualMachine);
    rpc ListMachine(google.protobuf.Empty) returns (stream VirtualMachine);
}

virtual_machine.proto

syntax = "proto3";

message VirtualMachine {
    int32 id = 1;
    string host_name = 2;
}

requests/get_machine_request.proto

syntax = "proto3";

message GetMachineRequest {
    int32 id = 1;
}

After this, we can create a Makefile to shortcut running some commands:

Makefile

PROTO_FILES := $(shell find protos -type f -name '*.proto')

compile_protos: clean compile

compile:
	@mkdir -p generated
	@echo "Compiling files: $(PROTO_FILES)"
	@python -m grpc_tools.protoc -I./protos --python_out=generated --pyi_out=generated --grpc_python_out=generated $(PROTO_FILES)

clean:
	rm -rf generated

This basically will find all the *.proto files and then do a compile for each.

Now we can run make which will automatically generate the python file in the generated folder.

Create client and server

Once we generate the code, it's time to start programming our server and client

Server.py

import sys
sys.path.append("generated")

from generated import vm_service_pb2, vm_service_pb2_grpc, virtual_machine_pb2
import grpc
import uuid

from concurrent import futures


class VirtualMachineService(vm_service_pb2_grpc.VirtualMachineService):
    def GetMachine(self, request, context):
        virtualMachine = virtual_machine_pb2.VirtualMachine(id=request.id, host_name=str(uuid.uuid4()))
        return virtualMachine

def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=8))
    
    vm_service_pb2_grpc.add_VirtualMachineServiceServicer_to_server(
        VirtualMachineService(),
        server
    )
    
    server.add_insecure_port("127.0.0.1:5000")
    server.start()

    print("Server is running port 5000")
    server.wait_for_termination()

if __name__ == "__main__":
    serve()

[!important]
Remember to do sys.path.append("generated") here to add the generated code inside the path

Client.py

import sys

sys.path.append("generated")

import grpc
from generated import vm_service_pb2_grpc
from generated.requests import get_machine_request_pb2

def run():
    channel = grpc.insecure_channel("localhost:5000")
    stub = vm_service_pb2_grpc.VirtualMachineServiceStub(channel)
    res = stub.GetMachine(get_machine_request_pb2.GetMachineRequest(id=1))
    print("Client response", res)


if __name__ == "__main__":
    run()
    

[!important]
Remember to do sys.path.append("generated") here to add the generated code inside the path